xref: /dpdk/app/test-crypto-perf/main.c (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15 
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23 
24 
25 const char *cperf_test_type_strs[] = {
26 	[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
27 	[CPERF_TEST_TYPE_LATENCY] = "latency",
28 	[CPERF_TEST_TYPE_VERIFY] = "verify",
29 	[CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
30 };
31 
32 const char *cperf_op_type_strs[] = {
33 	[CPERF_CIPHER_ONLY] = "cipher-only",
34 	[CPERF_AUTH_ONLY] = "auth-only",
35 	[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
36 	[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
37 	[CPERF_AEAD] = "aead"
38 };
39 
40 const struct cperf_test cperf_testmap[] = {
41 		[CPERF_TEST_TYPE_THROUGHPUT] = {
42 				cperf_throughput_test_constructor,
43 				cperf_throughput_test_runner,
44 				cperf_throughput_test_destructor
45 		},
46 		[CPERF_TEST_TYPE_LATENCY] = {
47 				cperf_latency_test_constructor,
48 				cperf_latency_test_runner,
49 				cperf_latency_test_destructor
50 		},
51 		[CPERF_TEST_TYPE_VERIFY] = {
52 				cperf_verify_test_constructor,
53 				cperf_verify_test_runner,
54 				cperf_verify_test_destructor
55 		},
56 		[CPERF_TEST_TYPE_PMDCC] = {
57 				cperf_pmd_cyclecount_test_constructor,
58 				cperf_pmd_cyclecount_test_runner,
59 				cperf_pmd_cyclecount_test_destructor
60 		}
61 };
62 
63 static int
64 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
65 			struct rte_mempool *session_pool_socket[])
66 {
67 	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
68 	uint32_t sessions_needed = 0;
69 	unsigned int i, j;
70 	int ret;
71 
72 	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
73 			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
74 	if (enabled_cdev_count == 0) {
75 		printf("No crypto devices type %s available\n",
76 				opts->device_type);
77 		return -EINVAL;
78 	}
79 
80 	nb_lcores = rte_lcore_count() - 1;
81 
82 	if (nb_lcores < 1) {
83 		RTE_LOG(ERR, USER1,
84 			"Number of enabled cores need to be higher than 1\n");
85 		return -EINVAL;
86 	}
87 
88 	/*
89 	 * Use less number of devices,
90 	 * if there are more available than cores.
91 	 */
92 	if (enabled_cdev_count > nb_lcores)
93 		enabled_cdev_count = nb_lcores;
94 
95 	/* Create a mempool shared by all the devices */
96 	uint32_t max_sess_size = 0, sess_size;
97 
98 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
99 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
100 		if (sess_size > max_sess_size)
101 			max_sess_size = sess_size;
102 	}
103 
104 	/*
105 	 * Calculate number of needed queue pairs, based on the amount
106 	 * of available number of logical cores and crypto devices.
107 	 * For instance, if there are 4 cores and 2 crypto devices,
108 	 * 2 queue pairs will be set up per device.
109 	 */
110 	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
111 				(nb_lcores / enabled_cdev_count) + 1 :
112 				nb_lcores / enabled_cdev_count;
113 
114 	for (i = 0; i < enabled_cdev_count &&
115 			i < RTE_CRYPTO_MAX_DEVS; i++) {
116 		cdev_id = enabled_cdevs[i];
117 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
118 		/*
119 		 * If multi-core scheduler is used, limit the number
120 		 * of queue pairs to 1, as there is no way to know
121 		 * how many cores are being used by the PMD, and
122 		 * how many will be available for the application.
123 		 */
124 		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
125 				rte_cryptodev_scheduler_mode_get(cdev_id) ==
126 				CDEV_SCHED_MODE_MULTICORE)
127 			opts->nb_qps = 1;
128 #endif
129 
130 		struct rte_cryptodev_info cdev_info;
131 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
132 
133 		rte_cryptodev_info_get(cdev_id, &cdev_info);
134 		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
135 			printf("Number of needed queue pairs is higher "
136 				"than the maximum number of queue pairs "
137 				"per device.\n");
138 			printf("Lower the number of cores or increase "
139 				"the number of crypto devices\n");
140 			return -EINVAL;
141 		}
142 		struct rte_cryptodev_config conf = {
143 			.nb_queue_pairs = opts->nb_qps,
144 			.socket_id = socket_id
145 		};
146 
147 		struct rte_cryptodev_qp_conf qp_conf = {
148 			.nb_descriptors = opts->nb_descriptors
149 		};
150 
151 		/**
152 		 * Device info specifies the min headroom and tailroom
153 		 * requirement for the crypto PMD. This need to be honoured
154 		 * by the application, while creating mbuf.
155 		 */
156 		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
157 			/* Update headroom */
158 			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
159 		}
160 		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
161 			/* Update tailroom */
162 			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
163 		}
164 
165 		/* Update segment size to include headroom & tailroom */
166 		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
167 
168 		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
169 		/*
170 		 * Two sessions objects are required for each session
171 		 * (one for the header, one for the private data)
172 		 */
173 		if (!strcmp((const char *)opts->device_type,
174 					"crypto_scheduler")) {
175 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
176 			uint32_t nb_slaves =
177 				rte_cryptodev_scheduler_slaves_get(cdev_id,
178 								NULL);
179 
180 			sessions_needed = 2 * enabled_cdev_count *
181 				opts->nb_qps * nb_slaves;
182 #endif
183 		} else
184 			sessions_needed = 2 * enabled_cdev_count *
185 						opts->nb_qps;
186 
187 		/*
188 		 * A single session is required per queue pair
189 		 * in each device
190 		 */
191 		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
192 			RTE_LOG(ERR, USER1,
193 				"Device does not support at least "
194 				"%u sessions\n", opts->nb_qps);
195 			return -ENOTSUP;
196 		}
197 		if (session_pool_socket[socket_id] == NULL) {
198 			char mp_name[RTE_MEMPOOL_NAMESIZE];
199 			struct rte_mempool *sess_mp;
200 
201 			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
202 				"sess_mp_%u", socket_id);
203 			sess_mp = rte_mempool_create(mp_name,
204 						sessions_needed,
205 						max_sess_size,
206 						0,
207 						0, NULL, NULL, NULL,
208 						NULL, socket_id,
209 						0);
210 
211 			if (sess_mp == NULL) {
212 				printf("Cannot create session pool on socket %d\n",
213 					socket_id);
214 				return -ENOMEM;
215 			}
216 
217 			printf("Allocated session pool on socket %d\n", socket_id);
218 			session_pool_socket[socket_id] = sess_mp;
219 		}
220 
221 		ret = rte_cryptodev_configure(cdev_id, &conf);
222 		if (ret < 0) {
223 			printf("Failed to configure cryptodev %u", cdev_id);
224 			return -EINVAL;
225 		}
226 
227 		for (j = 0; j < opts->nb_qps; j++) {
228 			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
229 				&qp_conf, socket_id,
230 				session_pool_socket[socket_id]);
231 			if (ret < 0) {
232 				printf("Failed to setup queue pair %u on "
233 					"cryptodev %u",	j, cdev_id);
234 				return -EINVAL;
235 			}
236 		}
237 
238 		ret = rte_cryptodev_start(cdev_id);
239 		if (ret < 0) {
240 			printf("Failed to start device %u: error %d\n",
241 					cdev_id, ret);
242 			return -EPERM;
243 		}
244 	}
245 
246 	return enabled_cdev_count;
247 }
248 
249 static int
250 cperf_verify_devices_capabilities(struct cperf_options *opts,
251 		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
252 {
253 	struct rte_cryptodev_sym_capability_idx cap_idx;
254 	const struct rte_cryptodev_symmetric_capability *capability;
255 
256 	uint8_t i, cdev_id;
257 	int ret;
258 
259 	for (i = 0; i < nb_cryptodevs; i++) {
260 
261 		cdev_id = enabled_cdevs[i];
262 
263 		if (opts->op_type == CPERF_AUTH_ONLY ||
264 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
265 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
266 
267 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
268 			cap_idx.algo.auth = opts->auth_algo;
269 
270 			capability = rte_cryptodev_sym_capability_get(cdev_id,
271 					&cap_idx);
272 			if (capability == NULL)
273 				return -1;
274 
275 			ret = rte_cryptodev_sym_capability_check_auth(
276 					capability,
277 					opts->auth_key_sz,
278 					opts->digest_sz,
279 					opts->auth_iv_sz);
280 			if (ret != 0)
281 				return ret;
282 		}
283 
284 		if (opts->op_type == CPERF_CIPHER_ONLY ||
285 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
286 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
287 
288 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
289 			cap_idx.algo.cipher = opts->cipher_algo;
290 
291 			capability = rte_cryptodev_sym_capability_get(cdev_id,
292 					&cap_idx);
293 			if (capability == NULL)
294 				return -1;
295 
296 			ret = rte_cryptodev_sym_capability_check_cipher(
297 					capability,
298 					opts->cipher_key_sz,
299 					opts->cipher_iv_sz);
300 			if (ret != 0)
301 				return ret;
302 		}
303 
304 		if (opts->op_type == CPERF_AEAD) {
305 
306 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
307 			cap_idx.algo.aead = opts->aead_algo;
308 
309 			capability = rte_cryptodev_sym_capability_get(cdev_id,
310 					&cap_idx);
311 			if (capability == NULL)
312 				return -1;
313 
314 			ret = rte_cryptodev_sym_capability_check_aead(
315 					capability,
316 					opts->aead_key_sz,
317 					opts->digest_sz,
318 					opts->aead_aad_sz,
319 					opts->aead_iv_sz);
320 			if (ret != 0)
321 				return ret;
322 		}
323 	}
324 
325 	return 0;
326 }
327 
328 static int
329 cperf_check_test_vector(struct cperf_options *opts,
330 		struct cperf_test_vector *test_vec)
331 {
332 	if (opts->op_type == CPERF_CIPHER_ONLY) {
333 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
334 			if (test_vec->plaintext.data == NULL)
335 				return -1;
336 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
337 			if (test_vec->plaintext.data == NULL)
338 				return -1;
339 			if (test_vec->plaintext.length < opts->max_buffer_size)
340 				return -1;
341 			if (test_vec->ciphertext.data == NULL)
342 				return -1;
343 			if (test_vec->ciphertext.length < opts->max_buffer_size)
344 				return -1;
345 			/* Cipher IV is only required for some algorithms */
346 			if (opts->cipher_iv_sz &&
347 					test_vec->cipher_iv.data == NULL)
348 				return -1;
349 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
350 				return -1;
351 			if (test_vec->cipher_key.data == NULL)
352 				return -1;
353 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
354 				return -1;
355 		}
356 	} else if (opts->op_type == CPERF_AUTH_ONLY) {
357 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
358 			if (test_vec->plaintext.data == NULL)
359 				return -1;
360 			if (test_vec->plaintext.length < opts->max_buffer_size)
361 				return -1;
362 			/* Auth key is only required for some algorithms */
363 			if (opts->auth_key_sz &&
364 					test_vec->auth_key.data == NULL)
365 				return -1;
366 			if (test_vec->auth_key.length != opts->auth_key_sz)
367 				return -1;
368 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
369 				return -1;
370 			/* Auth IV is only required for some algorithms */
371 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
372 				return -1;
373 			if (test_vec->digest.data == NULL)
374 				return -1;
375 			if (test_vec->digest.length < opts->digest_sz)
376 				return -1;
377 		}
378 
379 	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
380 			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
381 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
382 			if (test_vec->plaintext.data == NULL)
383 				return -1;
384 			if (test_vec->plaintext.length < opts->max_buffer_size)
385 				return -1;
386 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
387 			if (test_vec->plaintext.data == NULL)
388 				return -1;
389 			if (test_vec->plaintext.length < opts->max_buffer_size)
390 				return -1;
391 			if (test_vec->ciphertext.data == NULL)
392 				return -1;
393 			if (test_vec->ciphertext.length < opts->max_buffer_size)
394 				return -1;
395 			if (test_vec->cipher_iv.data == NULL)
396 				return -1;
397 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
398 				return -1;
399 			if (test_vec->cipher_key.data == NULL)
400 				return -1;
401 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
402 				return -1;
403 		}
404 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
405 			if (test_vec->auth_key.data == NULL)
406 				return -1;
407 			if (test_vec->auth_key.length != opts->auth_key_sz)
408 				return -1;
409 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
410 				return -1;
411 			/* Auth IV is only required for some algorithms */
412 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
413 				return -1;
414 			if (test_vec->digest.data == NULL)
415 				return -1;
416 			if (test_vec->digest.length < opts->digest_sz)
417 				return -1;
418 		}
419 	} else if (opts->op_type == CPERF_AEAD) {
420 		if (test_vec->plaintext.data == NULL)
421 			return -1;
422 		if (test_vec->plaintext.length < opts->max_buffer_size)
423 			return -1;
424 		if (test_vec->ciphertext.data == NULL)
425 			return -1;
426 		if (test_vec->ciphertext.length < opts->max_buffer_size)
427 			return -1;
428 		if (test_vec->aead_key.data == NULL)
429 			return -1;
430 		if (test_vec->aead_key.length != opts->aead_key_sz)
431 			return -1;
432 		if (test_vec->aead_iv.data == NULL)
433 			return -1;
434 		if (test_vec->aead_iv.length != opts->aead_iv_sz)
435 			return -1;
436 		if (test_vec->aad.data == NULL)
437 			return -1;
438 		if (test_vec->aad.length != opts->aead_aad_sz)
439 			return -1;
440 		if (test_vec->digest.data == NULL)
441 			return -1;
442 		if (test_vec->digest.length < opts->digest_sz)
443 			return -1;
444 	}
445 	return 0;
446 }
447 
448 int
449 main(int argc, char **argv)
450 {
451 	struct cperf_options opts = {0};
452 	struct cperf_test_vector *t_vec = NULL;
453 	struct cperf_op_fns op_fns;
454 
455 	void *ctx[RTE_MAX_LCORE] = { };
456 	struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
457 
458 	int nb_cryptodevs = 0;
459 	uint16_t total_nb_qps = 0;
460 	uint8_t cdev_id, i;
461 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
462 
463 	uint8_t buffer_size_idx = 0;
464 
465 	int ret;
466 	uint32_t lcore_id;
467 
468 	/* Initialise DPDK EAL */
469 	ret = rte_eal_init(argc, argv);
470 	if (ret < 0)
471 		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
472 	argc -= ret;
473 	argv += ret;
474 
475 	cperf_options_default(&opts);
476 
477 	ret = cperf_options_parse(&opts, argc, argv);
478 	if (ret) {
479 		RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
480 		goto err;
481 	}
482 
483 	ret = cperf_options_check(&opts);
484 	if (ret) {
485 		RTE_LOG(ERR, USER1,
486 				"Checking on or more user options failed\n");
487 		goto err;
488 	}
489 
490 	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
491 			session_pool_socket);
492 
493 	if (!opts.silent)
494 		cperf_options_dump(&opts);
495 
496 	if (nb_cryptodevs < 1) {
497 		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
498 				"device type\n");
499 		nb_cryptodevs = 0;
500 		goto err;
501 	}
502 
503 	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
504 			nb_cryptodevs);
505 	if (ret) {
506 		RTE_LOG(ERR, USER1, "Crypto device type does not support "
507 				"capabilities requested\n");
508 		goto err;
509 	}
510 
511 	if (opts.test_file != NULL) {
512 		t_vec = cperf_test_vector_get_from_file(&opts);
513 		if (t_vec == NULL) {
514 			RTE_LOG(ERR, USER1,
515 					"Failed to create test vector for"
516 					" specified file\n");
517 			goto err;
518 		}
519 
520 		if (cperf_check_test_vector(&opts, t_vec)) {
521 			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
522 					"\n");
523 			goto err;
524 		}
525 	} else {
526 		t_vec = cperf_test_vector_get_dummy(&opts);
527 		if (t_vec == NULL) {
528 			RTE_LOG(ERR, USER1,
529 					"Failed to create test vector for"
530 					" specified algorithms\n");
531 			goto err;
532 		}
533 	}
534 
535 	ret = cperf_get_op_functions(&opts, &op_fns);
536 	if (ret) {
537 		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
538 				"specified algorithms combination\n");
539 		goto err;
540 	}
541 
542 	if (!opts.silent)
543 		show_test_vector(t_vec);
544 
545 	total_nb_qps = nb_cryptodevs * opts.nb_qps;
546 
547 	i = 0;
548 	uint8_t qp_id = 0, cdev_index = 0;
549 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
550 
551 		if (i == total_nb_qps)
552 			break;
553 
554 		cdev_id = enabled_cdevs[cdev_index];
555 
556 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
557 
558 		ctx[i] = cperf_testmap[opts.test].constructor(
559 				session_pool_socket[socket_id], cdev_id, qp_id,
560 				&opts, t_vec, &op_fns);
561 		if (ctx[i] == NULL) {
562 			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
563 			goto err;
564 		}
565 		qp_id = (qp_id + 1) % opts.nb_qps;
566 		if (qp_id == 0)
567 			cdev_index++;
568 		i++;
569 	}
570 
571 	if (opts.imix_distribution_count != 0) {
572 		uint8_t buffer_size_count = opts.buffer_size_count;
573 		uint16_t distribution_total[buffer_size_count];
574 		uint32_t op_idx;
575 		uint32_t test_average_size = 0;
576 		const uint32_t *buffer_size_list = opts.buffer_size_list;
577 		const uint32_t *imix_distribution_list = opts.imix_distribution_list;
578 
579 		opts.imix_buffer_sizes = rte_malloc(NULL,
580 					sizeof(uint32_t) * opts.pool_sz,
581 					0);
582 		/*
583 		 * Calculate accumulated distribution of
584 		 * probabilities per packet size
585 		 */
586 		distribution_total[0] = imix_distribution_list[0];
587 		for (i = 1; i < buffer_size_count; i++)
588 			distribution_total[i] = imix_distribution_list[i] +
589 				distribution_total[i-1];
590 
591 		/* Calculate a random sequence of packet sizes, based on distribution */
592 		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
593 			uint16_t random_number = rte_rand() %
594 				distribution_total[buffer_size_count - 1];
595 			for (i = 0; i < buffer_size_count; i++)
596 				if (random_number < distribution_total[i])
597 					break;
598 
599 			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
600 		}
601 
602 		/* Calculate average buffer size for the IMIX distribution */
603 		for (i = 0; i < buffer_size_count; i++)
604 			test_average_size += buffer_size_list[i] *
605 				imix_distribution_list[i];
606 
607 		opts.test_buffer_size = test_average_size /
608 				distribution_total[buffer_size_count - 1];
609 
610 		i = 0;
611 		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
612 
613 			if (i == total_nb_qps)
614 				break;
615 
616 			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
617 				ctx[i], lcore_id);
618 			i++;
619 		}
620 		i = 0;
621 		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
622 
623 			if (i == total_nb_qps)
624 				break;
625 			rte_eal_wait_lcore(lcore_id);
626 			i++;
627 		}
628 	} else {
629 
630 		/* Get next size from range or list */
631 		if (opts.inc_buffer_size != 0)
632 			opts.test_buffer_size = opts.min_buffer_size;
633 		else
634 			opts.test_buffer_size = opts.buffer_size_list[0];
635 
636 		while (opts.test_buffer_size <= opts.max_buffer_size) {
637 			i = 0;
638 			RTE_LCORE_FOREACH_SLAVE(lcore_id) {
639 
640 				if (i == total_nb_qps)
641 					break;
642 
643 				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
644 					ctx[i], lcore_id);
645 				i++;
646 			}
647 			i = 0;
648 			RTE_LCORE_FOREACH_SLAVE(lcore_id) {
649 
650 				if (i == total_nb_qps)
651 					break;
652 				rte_eal_wait_lcore(lcore_id);
653 				i++;
654 			}
655 
656 			/* Get next size from range or list */
657 			if (opts.inc_buffer_size != 0)
658 				opts.test_buffer_size += opts.inc_buffer_size;
659 			else {
660 				if (++buffer_size_idx == opts.buffer_size_count)
661 					break;
662 				opts.test_buffer_size =
663 					opts.buffer_size_list[buffer_size_idx];
664 			}
665 		}
666 	}
667 
668 	i = 0;
669 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
670 
671 		if (i == total_nb_qps)
672 			break;
673 
674 		cperf_testmap[opts.test].destructor(ctx[i]);
675 		i++;
676 	}
677 
678 	for (i = 0; i < nb_cryptodevs &&
679 			i < RTE_CRYPTO_MAX_DEVS; i++)
680 		rte_cryptodev_stop(enabled_cdevs[i]);
681 
682 	free_test_vector(t_vec, &opts);
683 
684 	printf("\n");
685 	return EXIT_SUCCESS;
686 
687 err:
688 	i = 0;
689 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
690 		if (i == total_nb_qps)
691 			break;
692 
693 		if (ctx[i] && cperf_testmap[opts.test].destructor)
694 			cperf_testmap[opts.test].destructor(ctx[i]);
695 		i++;
696 	}
697 
698 	for (i = 0; i < nb_cryptodevs &&
699 			i < RTE_CRYPTO_MAX_DEVS; i++)
700 		rte_cryptodev_stop(enabled_cdevs[i]);
701 	rte_free(opts.imix_buffer_sizes);
702 	free_test_vector(t_vec, &opts);
703 
704 	printf("\n");
705 	return EXIT_FAILURE;
706 }
707