xref: /dpdk/app/test-crypto-perf/main.c (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15 
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23 
24 static struct {
25 	struct rte_mempool *sess_mp;
26 	struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
28 
29 const char *cperf_test_type_strs[] = {
30 	[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31 	[CPERF_TEST_TYPE_LATENCY] = "latency",
32 	[CPERF_TEST_TYPE_VERIFY] = "verify",
33 	[CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
34 };
35 
36 const char *cperf_op_type_strs[] = {
37 	[CPERF_CIPHER_ONLY] = "cipher-only",
38 	[CPERF_AUTH_ONLY] = "auth-only",
39 	[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40 	[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41 	[CPERF_AEAD] = "aead"
42 };
43 
44 const struct cperf_test cperf_testmap[] = {
45 		[CPERF_TEST_TYPE_THROUGHPUT] = {
46 				cperf_throughput_test_constructor,
47 				cperf_throughput_test_runner,
48 				cperf_throughput_test_destructor
49 		},
50 		[CPERF_TEST_TYPE_LATENCY] = {
51 				cperf_latency_test_constructor,
52 				cperf_latency_test_runner,
53 				cperf_latency_test_destructor
54 		},
55 		[CPERF_TEST_TYPE_VERIFY] = {
56 				cperf_verify_test_constructor,
57 				cperf_verify_test_runner,
58 				cperf_verify_test_destructor
59 		},
60 		[CPERF_TEST_TYPE_PMDCC] = {
61 				cperf_pmd_cyclecount_test_constructor,
62 				cperf_pmd_cyclecount_test_runner,
63 				cperf_pmd_cyclecount_test_destructor
64 		}
65 };
66 
67 static int
68 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
69 		uint32_t nb_sessions)
70 {
71 	char mp_name[RTE_MEMPOOL_NAMESIZE];
72 	struct rte_mempool *sess_mp;
73 
74 	if (session_pool_socket[socket_id].priv_mp == NULL) {
75 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
76 			"priv_sess_mp_%u", socket_id);
77 
78 		sess_mp = rte_mempool_create(mp_name,
79 					nb_sessions,
80 					session_priv_size,
81 					0, 0, NULL, NULL, NULL,
82 					NULL, socket_id,
83 					0);
84 
85 		if (sess_mp == NULL) {
86 			printf("Cannot create pool \"%s\" on socket %d\n",
87 				mp_name, socket_id);
88 			return -ENOMEM;
89 		}
90 
91 		printf("Allocated pool \"%s\" on socket %d\n",
92 			mp_name, socket_id);
93 		session_pool_socket[socket_id].priv_mp = sess_mp;
94 	}
95 
96 	if (session_pool_socket[socket_id].sess_mp == NULL) {
97 
98 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
99 			"sess_mp_%u", socket_id);
100 
101 		sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
102 					nb_sessions, 0, 0, 0, socket_id);
103 
104 		if (sess_mp == NULL) {
105 			printf("Cannot create pool \"%s\" on socket %d\n",
106 				mp_name, socket_id);
107 			return -ENOMEM;
108 		}
109 
110 		printf("Allocated pool \"%s\" on socket %d\n",
111 			mp_name, socket_id);
112 		session_pool_socket[socket_id].sess_mp = sess_mp;
113 	}
114 
115 	return 0;
116 }
117 
118 static int
119 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
120 {
121 	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
122 	uint32_t sessions_needed = 0;
123 	unsigned int i, j;
124 	int ret;
125 
126 	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
127 			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
128 	if (enabled_cdev_count == 0) {
129 		printf("No crypto devices type %s available\n",
130 				opts->device_type);
131 		return -EINVAL;
132 	}
133 
134 	nb_lcores = rte_lcore_count() - 1;
135 
136 	if (nb_lcores < 1) {
137 		RTE_LOG(ERR, USER1,
138 			"Number of enabled cores need to be higher than 1\n");
139 		return -EINVAL;
140 	}
141 
142 	/*
143 	 * Use less number of devices,
144 	 * if there are more available than cores.
145 	 */
146 	if (enabled_cdev_count > nb_lcores)
147 		enabled_cdev_count = nb_lcores;
148 
149 	/* Create a mempool shared by all the devices */
150 	uint32_t max_sess_size = 0, sess_size;
151 
152 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
153 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
154 		if (sess_size > max_sess_size)
155 			max_sess_size = sess_size;
156 	}
157 
158 	/*
159 	 * Calculate number of needed queue pairs, based on the amount
160 	 * of available number of logical cores and crypto devices.
161 	 * For instance, if there are 4 cores and 2 crypto devices,
162 	 * 2 queue pairs will be set up per device.
163 	 */
164 	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
165 				(nb_lcores / enabled_cdev_count) + 1 :
166 				nb_lcores / enabled_cdev_count;
167 
168 	for (i = 0; i < enabled_cdev_count &&
169 			i < RTE_CRYPTO_MAX_DEVS; i++) {
170 		cdev_id = enabled_cdevs[i];
171 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
172 		/*
173 		 * If multi-core scheduler is used, limit the number
174 		 * of queue pairs to 1, as there is no way to know
175 		 * how many cores are being used by the PMD, and
176 		 * how many will be available for the application.
177 		 */
178 		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
179 				rte_cryptodev_scheduler_mode_get(cdev_id) ==
180 				CDEV_SCHED_MODE_MULTICORE)
181 			opts->nb_qps = 1;
182 #endif
183 
184 		struct rte_cryptodev_info cdev_info;
185 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
186 		/* range check the socket_id - negative values become big
187 		 * positive ones due to use of unsigned value
188 		 */
189 		if (socket_id >= RTE_MAX_NUMA_NODES)
190 			socket_id = 0;
191 
192 		rte_cryptodev_info_get(cdev_id, &cdev_info);
193 		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
194 			printf("Number of needed queue pairs is higher "
195 				"than the maximum number of queue pairs "
196 				"per device.\n");
197 			printf("Lower the number of cores or increase "
198 				"the number of crypto devices\n");
199 			return -EINVAL;
200 		}
201 		struct rte_cryptodev_config conf = {
202 			.nb_queue_pairs = opts->nb_qps,
203 			.socket_id = socket_id
204 		};
205 
206 		struct rte_cryptodev_qp_conf qp_conf = {
207 			.nb_descriptors = opts->nb_descriptors
208 		};
209 
210 		/**
211 		 * Device info specifies the min headroom and tailroom
212 		 * requirement for the crypto PMD. This need to be honoured
213 		 * by the application, while creating mbuf.
214 		 */
215 		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
216 			/* Update headroom */
217 			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
218 		}
219 		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
220 			/* Update tailroom */
221 			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
222 		}
223 
224 		/* Update segment size to include headroom & tailroom */
225 		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
226 
227 		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
228 		/*
229 		 * Two sessions objects are required for each session
230 		 * (one for the header, one for the private data)
231 		 */
232 		if (!strcmp((const char *)opts->device_type,
233 					"crypto_scheduler")) {
234 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
235 			uint32_t nb_slaves =
236 				rte_cryptodev_scheduler_slaves_get(cdev_id,
237 								NULL);
238 
239 			sessions_needed = enabled_cdev_count *
240 				opts->nb_qps * nb_slaves;
241 #endif
242 		} else
243 			sessions_needed = enabled_cdev_count *
244 						opts->nb_qps;
245 
246 		/*
247 		 * A single session is required per queue pair
248 		 * in each device
249 		 */
250 		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
251 			RTE_LOG(ERR, USER1,
252 				"Device does not support at least "
253 				"%u sessions\n", opts->nb_qps);
254 			return -ENOTSUP;
255 		}
256 
257 		ret = fill_session_pool_socket(socket_id, max_sess_size,
258 				sessions_needed);
259 		if (ret < 0)
260 			return ret;
261 
262 		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
263 		qp_conf.mp_session_private =
264 				session_pool_socket[socket_id].priv_mp;
265 
266 		ret = rte_cryptodev_configure(cdev_id, &conf);
267 		if (ret < 0) {
268 			printf("Failed to configure cryptodev %u", cdev_id);
269 			return -EINVAL;
270 		}
271 
272 		for (j = 0; j < opts->nb_qps; j++) {
273 			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
274 				&qp_conf, socket_id);
275 			if (ret < 0) {
276 				printf("Failed to setup queue pair %u on "
277 					"cryptodev %u",	j, cdev_id);
278 				return -EINVAL;
279 			}
280 		}
281 
282 		ret = rte_cryptodev_start(cdev_id);
283 		if (ret < 0) {
284 			printf("Failed to start device %u: error %d\n",
285 					cdev_id, ret);
286 			return -EPERM;
287 		}
288 	}
289 
290 	return enabled_cdev_count;
291 }
292 
293 static int
294 cperf_verify_devices_capabilities(struct cperf_options *opts,
295 		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
296 {
297 	struct rte_cryptodev_sym_capability_idx cap_idx;
298 	const struct rte_cryptodev_symmetric_capability *capability;
299 
300 	uint8_t i, cdev_id;
301 	int ret;
302 
303 	for (i = 0; i < nb_cryptodevs; i++) {
304 
305 		cdev_id = enabled_cdevs[i];
306 
307 		if (opts->op_type == CPERF_AUTH_ONLY ||
308 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
309 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
310 
311 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
312 			cap_idx.algo.auth = opts->auth_algo;
313 
314 			capability = rte_cryptodev_sym_capability_get(cdev_id,
315 					&cap_idx);
316 			if (capability == NULL)
317 				return -1;
318 
319 			ret = rte_cryptodev_sym_capability_check_auth(
320 					capability,
321 					opts->auth_key_sz,
322 					opts->digest_sz,
323 					opts->auth_iv_sz);
324 			if (ret != 0)
325 				return ret;
326 		}
327 
328 		if (opts->op_type == CPERF_CIPHER_ONLY ||
329 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
330 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
331 
332 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
333 			cap_idx.algo.cipher = opts->cipher_algo;
334 
335 			capability = rte_cryptodev_sym_capability_get(cdev_id,
336 					&cap_idx);
337 			if (capability == NULL)
338 				return -1;
339 
340 			ret = rte_cryptodev_sym_capability_check_cipher(
341 					capability,
342 					opts->cipher_key_sz,
343 					opts->cipher_iv_sz);
344 			if (ret != 0)
345 				return ret;
346 		}
347 
348 		if (opts->op_type == CPERF_AEAD) {
349 
350 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
351 			cap_idx.algo.aead = opts->aead_algo;
352 
353 			capability = rte_cryptodev_sym_capability_get(cdev_id,
354 					&cap_idx);
355 			if (capability == NULL)
356 				return -1;
357 
358 			ret = rte_cryptodev_sym_capability_check_aead(
359 					capability,
360 					opts->aead_key_sz,
361 					opts->digest_sz,
362 					opts->aead_aad_sz,
363 					opts->aead_iv_sz);
364 			if (ret != 0)
365 				return ret;
366 		}
367 	}
368 
369 	return 0;
370 }
371 
372 static int
373 cperf_check_test_vector(struct cperf_options *opts,
374 		struct cperf_test_vector *test_vec)
375 {
376 	if (opts->op_type == CPERF_CIPHER_ONLY) {
377 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
378 			if (test_vec->plaintext.data == NULL)
379 				return -1;
380 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
381 			if (test_vec->plaintext.data == NULL)
382 				return -1;
383 			if (test_vec->plaintext.length < opts->max_buffer_size)
384 				return -1;
385 			if (test_vec->ciphertext.data == NULL)
386 				return -1;
387 			if (test_vec->ciphertext.length < opts->max_buffer_size)
388 				return -1;
389 			/* Cipher IV is only required for some algorithms */
390 			if (opts->cipher_iv_sz &&
391 					test_vec->cipher_iv.data == NULL)
392 				return -1;
393 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
394 				return -1;
395 			if (test_vec->cipher_key.data == NULL)
396 				return -1;
397 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
398 				return -1;
399 		}
400 	} else if (opts->op_type == CPERF_AUTH_ONLY) {
401 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
402 			if (test_vec->plaintext.data == NULL)
403 				return -1;
404 			if (test_vec->plaintext.length < opts->max_buffer_size)
405 				return -1;
406 			/* Auth key is only required for some algorithms */
407 			if (opts->auth_key_sz &&
408 					test_vec->auth_key.data == NULL)
409 				return -1;
410 			if (test_vec->auth_key.length != opts->auth_key_sz)
411 				return -1;
412 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
413 				return -1;
414 			/* Auth IV is only required for some algorithms */
415 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
416 				return -1;
417 			if (test_vec->digest.data == NULL)
418 				return -1;
419 			if (test_vec->digest.length < opts->digest_sz)
420 				return -1;
421 		}
422 
423 	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
424 			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
425 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
426 			if (test_vec->plaintext.data == NULL)
427 				return -1;
428 			if (test_vec->plaintext.length < opts->max_buffer_size)
429 				return -1;
430 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
431 			if (test_vec->plaintext.data == NULL)
432 				return -1;
433 			if (test_vec->plaintext.length < opts->max_buffer_size)
434 				return -1;
435 			if (test_vec->ciphertext.data == NULL)
436 				return -1;
437 			if (test_vec->ciphertext.length < opts->max_buffer_size)
438 				return -1;
439 			if (test_vec->cipher_iv.data == NULL)
440 				return -1;
441 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
442 				return -1;
443 			if (test_vec->cipher_key.data == NULL)
444 				return -1;
445 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
446 				return -1;
447 		}
448 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
449 			if (test_vec->auth_key.data == NULL)
450 				return -1;
451 			if (test_vec->auth_key.length != opts->auth_key_sz)
452 				return -1;
453 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
454 				return -1;
455 			/* Auth IV is only required for some algorithms */
456 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
457 				return -1;
458 			if (test_vec->digest.data == NULL)
459 				return -1;
460 			if (test_vec->digest.length < opts->digest_sz)
461 				return -1;
462 		}
463 	} else if (opts->op_type == CPERF_AEAD) {
464 		if (test_vec->plaintext.data == NULL)
465 			return -1;
466 		if (test_vec->plaintext.length < opts->max_buffer_size)
467 			return -1;
468 		if (test_vec->ciphertext.data == NULL)
469 			return -1;
470 		if (test_vec->ciphertext.length < opts->max_buffer_size)
471 			return -1;
472 		if (test_vec->aead_key.data == NULL)
473 			return -1;
474 		if (test_vec->aead_key.length != opts->aead_key_sz)
475 			return -1;
476 		if (test_vec->aead_iv.data == NULL)
477 			return -1;
478 		if (test_vec->aead_iv.length != opts->aead_iv_sz)
479 			return -1;
480 		if (test_vec->aad.data == NULL)
481 			return -1;
482 		if (test_vec->aad.length != opts->aead_aad_sz)
483 			return -1;
484 		if (test_vec->digest.data == NULL)
485 			return -1;
486 		if (test_vec->digest.length < opts->digest_sz)
487 			return -1;
488 	}
489 	return 0;
490 }
491 
492 int
493 main(int argc, char **argv)
494 {
495 	struct cperf_options opts = {0};
496 	struct cperf_test_vector *t_vec = NULL;
497 	struct cperf_op_fns op_fns;
498 	void *ctx[RTE_MAX_LCORE] = { };
499 	int nb_cryptodevs = 0;
500 	uint16_t total_nb_qps = 0;
501 	uint8_t cdev_id, i;
502 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
503 
504 	uint8_t buffer_size_idx = 0;
505 
506 	int ret;
507 	uint32_t lcore_id;
508 
509 	/* Initialise DPDK EAL */
510 	ret = rte_eal_init(argc, argv);
511 	if (ret < 0)
512 		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
513 	argc -= ret;
514 	argv += ret;
515 
516 	cperf_options_default(&opts);
517 
518 	ret = cperf_options_parse(&opts, argc, argv);
519 	if (ret) {
520 		RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
521 		goto err;
522 	}
523 
524 	ret = cperf_options_check(&opts);
525 	if (ret) {
526 		RTE_LOG(ERR, USER1,
527 				"Checking on or more user options failed\n");
528 		goto err;
529 	}
530 
531 	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
532 
533 	if (!opts.silent)
534 		cperf_options_dump(&opts);
535 
536 	if (nb_cryptodevs < 1) {
537 		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
538 				"device type\n");
539 		nb_cryptodevs = 0;
540 		goto err;
541 	}
542 
543 	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
544 			nb_cryptodevs);
545 	if (ret) {
546 		RTE_LOG(ERR, USER1, "Crypto device type does not support "
547 				"capabilities requested\n");
548 		goto err;
549 	}
550 
551 	if (opts.test_file != NULL) {
552 		t_vec = cperf_test_vector_get_from_file(&opts);
553 		if (t_vec == NULL) {
554 			RTE_LOG(ERR, USER1,
555 					"Failed to create test vector for"
556 					" specified file\n");
557 			goto err;
558 		}
559 
560 		if (cperf_check_test_vector(&opts, t_vec)) {
561 			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
562 					"\n");
563 			goto err;
564 		}
565 	} else {
566 		t_vec = cperf_test_vector_get_dummy(&opts);
567 		if (t_vec == NULL) {
568 			RTE_LOG(ERR, USER1,
569 					"Failed to create test vector for"
570 					" specified algorithms\n");
571 			goto err;
572 		}
573 	}
574 
575 	ret = cperf_get_op_functions(&opts, &op_fns);
576 	if (ret) {
577 		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
578 				"specified algorithms combination\n");
579 		goto err;
580 	}
581 
582 	if (!opts.silent)
583 		show_test_vector(t_vec);
584 
585 	total_nb_qps = nb_cryptodevs * opts.nb_qps;
586 
587 	i = 0;
588 	uint8_t qp_id = 0, cdev_index = 0;
589 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
590 
591 		if (i == total_nb_qps)
592 			break;
593 
594 		cdev_id = enabled_cdevs[cdev_index];
595 
596 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
597 
598 		ctx[i] = cperf_testmap[opts.test].constructor(
599 				session_pool_socket[socket_id].sess_mp,
600 				session_pool_socket[socket_id].priv_mp,
601 				cdev_id, qp_id,
602 				&opts, t_vec, &op_fns);
603 		if (ctx[i] == NULL) {
604 			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
605 			goto err;
606 		}
607 		qp_id = (qp_id + 1) % opts.nb_qps;
608 		if (qp_id == 0)
609 			cdev_index++;
610 		i++;
611 	}
612 
613 	if (opts.imix_distribution_count != 0) {
614 		uint8_t buffer_size_count = opts.buffer_size_count;
615 		uint16_t distribution_total[buffer_size_count];
616 		uint32_t op_idx;
617 		uint32_t test_average_size = 0;
618 		const uint32_t *buffer_size_list = opts.buffer_size_list;
619 		const uint32_t *imix_distribution_list = opts.imix_distribution_list;
620 
621 		opts.imix_buffer_sizes = rte_malloc(NULL,
622 					sizeof(uint32_t) * opts.pool_sz,
623 					0);
624 		/*
625 		 * Calculate accumulated distribution of
626 		 * probabilities per packet size
627 		 */
628 		distribution_total[0] = imix_distribution_list[0];
629 		for (i = 1; i < buffer_size_count; i++)
630 			distribution_total[i] = imix_distribution_list[i] +
631 				distribution_total[i-1];
632 
633 		/* Calculate a random sequence of packet sizes, based on distribution */
634 		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
635 			uint16_t random_number = rte_rand() %
636 				distribution_total[buffer_size_count - 1];
637 			for (i = 0; i < buffer_size_count; i++)
638 				if (random_number < distribution_total[i])
639 					break;
640 
641 			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
642 		}
643 
644 		/* Calculate average buffer size for the IMIX distribution */
645 		for (i = 0; i < buffer_size_count; i++)
646 			test_average_size += buffer_size_list[i] *
647 				imix_distribution_list[i];
648 
649 		opts.test_buffer_size = test_average_size /
650 				distribution_total[buffer_size_count - 1];
651 
652 		i = 0;
653 		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
654 
655 			if (i == total_nb_qps)
656 				break;
657 
658 			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
659 				ctx[i], lcore_id);
660 			i++;
661 		}
662 		i = 0;
663 		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
664 
665 			if (i == total_nb_qps)
666 				break;
667 			rte_eal_wait_lcore(lcore_id);
668 			i++;
669 		}
670 	} else {
671 
672 		/* Get next size from range or list */
673 		if (opts.inc_buffer_size != 0)
674 			opts.test_buffer_size = opts.min_buffer_size;
675 		else
676 			opts.test_buffer_size = opts.buffer_size_list[0];
677 
678 		while (opts.test_buffer_size <= opts.max_buffer_size) {
679 			i = 0;
680 			RTE_LCORE_FOREACH_SLAVE(lcore_id) {
681 
682 				if (i == total_nb_qps)
683 					break;
684 
685 				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
686 					ctx[i], lcore_id);
687 				i++;
688 			}
689 			i = 0;
690 			RTE_LCORE_FOREACH_SLAVE(lcore_id) {
691 
692 				if (i == total_nb_qps)
693 					break;
694 				rte_eal_wait_lcore(lcore_id);
695 				i++;
696 			}
697 
698 			/* Get next size from range or list */
699 			if (opts.inc_buffer_size != 0)
700 				opts.test_buffer_size += opts.inc_buffer_size;
701 			else {
702 				if (++buffer_size_idx == opts.buffer_size_count)
703 					break;
704 				opts.test_buffer_size =
705 					opts.buffer_size_list[buffer_size_idx];
706 			}
707 		}
708 	}
709 
710 	i = 0;
711 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
712 
713 		if (i == total_nb_qps)
714 			break;
715 
716 		cperf_testmap[opts.test].destructor(ctx[i]);
717 		i++;
718 	}
719 
720 	for (i = 0; i < nb_cryptodevs &&
721 			i < RTE_CRYPTO_MAX_DEVS; i++)
722 		rte_cryptodev_stop(enabled_cdevs[i]);
723 
724 	free_test_vector(t_vec, &opts);
725 
726 	printf("\n");
727 	return EXIT_SUCCESS;
728 
729 err:
730 	i = 0;
731 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
732 		if (i == total_nb_qps)
733 			break;
734 
735 		if (ctx[i] && cperf_testmap[opts.test].destructor)
736 			cperf_testmap[opts.test].destructor(ctx[i]);
737 		i++;
738 	}
739 
740 	for (i = 0; i < nb_cryptodevs &&
741 			i < RTE_CRYPTO_MAX_DEVS; i++)
742 		rte_cryptodev_stop(enabled_cdevs[i]);
743 	rte_free(opts.imix_buffer_sizes);
744 	free_test_vector(t_vec, &opts);
745 
746 	printf("\n");
747 	return EXIT_FAILURE;
748 }
749