xref: /dpdk/app/test-crypto-perf/main.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <unistd.h>
8 
9 #include <rte_malloc.h>
10 #include <rte_random.h>
11 #include <rte_eal.h>
12 #include <rte_errno.h>
13 #include <rte_cryptodev.h>
14 #ifdef RTE_CRYPTO_SCHEDULER
15 #include <rte_cryptodev_scheduler.h>
16 #endif
17 
18 #include "cperf.h"
19 #include "cperf_options.h"
20 #include "cperf_test_vector_parsing.h"
21 #include "cperf_test_throughput.h"
22 #include "cperf_test_latency.h"
23 #include "cperf_test_verify.h"
24 #include "cperf_test_pmd_cyclecount.h"
25 
26 static struct {
27 	struct rte_mempool *sess_mp;
28 } session_pool_socket[RTE_MAX_NUMA_NODES];
29 
30 const char *cperf_test_type_strs[] = {
31 	[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
32 	[CPERF_TEST_TYPE_LATENCY] = "latency",
33 	[CPERF_TEST_TYPE_VERIFY] = "verify",
34 	[CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
35 };
36 
37 const char *cperf_op_type_strs[] = {
38 	[CPERF_CIPHER_ONLY] = "cipher-only",
39 	[CPERF_AUTH_ONLY] = "auth-only",
40 	[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
41 	[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
42 	[CPERF_AEAD] = "aead",
43 	[CPERF_PDCP] = "pdcp",
44 	[CPERF_DOCSIS] = "docsis",
45 	[CPERF_IPSEC] = "ipsec",
46 	[CPERF_ASYM_MODEX] = "modex",
47 	[CPERF_TLS] = "tls-record"
48 };
49 
50 const struct cperf_test cperf_testmap[] = {
51 		[CPERF_TEST_TYPE_THROUGHPUT] = {
52 				cperf_throughput_test_constructor,
53 				cperf_throughput_test_runner,
54 				cperf_throughput_test_destructor
55 		},
56 		[CPERF_TEST_TYPE_LATENCY] = {
57 				cperf_latency_test_constructor,
58 				cperf_latency_test_runner,
59 				cperf_latency_test_destructor
60 		},
61 		[CPERF_TEST_TYPE_VERIFY] = {
62 				cperf_verify_test_constructor,
63 				cperf_verify_test_runner,
64 				cperf_verify_test_destructor
65 		},
66 		[CPERF_TEST_TYPE_PMDCC] = {
67 				cperf_pmd_cyclecount_test_constructor,
68 				cperf_pmd_cyclecount_test_runner,
69 				cperf_pmd_cyclecount_test_destructor
70 		}
71 };
72 
73 static int
74 create_asym_op_pool_socket(int32_t socket_id, uint32_t nb_sessions)
75 {
76 	char mp_name[RTE_MEMPOOL_NAMESIZE];
77 	struct rte_mempool *mpool = NULL;
78 
79 	if (session_pool_socket[socket_id].sess_mp == NULL) {
80 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_sess_pool%u",
81 			 socket_id);
82 		mpool = rte_cryptodev_asym_session_pool_create(mp_name,
83 				nb_sessions, 0, 0, socket_id);
84 		if (mpool == NULL) {
85 			printf("Cannot create pool \"%s\" on socket %d\n",
86 			       mp_name, socket_id);
87 			return -ENOMEM;
88 		}
89 		session_pool_socket[socket_id].sess_mp = mpool;
90 	}
91 	return 0;
92 }
93 
94 static int
95 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
96 		uint32_t nb_sessions)
97 {
98 	char mp_name[RTE_MEMPOOL_NAMESIZE];
99 	struct rte_mempool *sess_mp;
100 
101 	if (session_pool_socket[socket_id].sess_mp == NULL) {
102 
103 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
104 			"sess_mp_%u", socket_id);
105 
106 		sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
107 					nb_sessions, session_priv_size, 0, 0,
108 					socket_id);
109 
110 		if (sess_mp == NULL) {
111 			printf("Cannot create pool \"%s\" on socket %d\n",
112 				mp_name, socket_id);
113 			return -ENOMEM;
114 		}
115 
116 		printf("Allocated pool \"%s\" on socket %d\n",
117 			mp_name, socket_id);
118 		session_pool_socket[socket_id].sess_mp = sess_mp;
119 	}
120 
121 	return 0;
122 }
123 
124 static int
125 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
126 {
127 	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
128 	uint32_t sessions_needed = 0;
129 	unsigned int i, j;
130 	int ret;
131 
132 	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
133 			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
134 	if (enabled_cdev_count == 0) {
135 		printf("No crypto devices type %s available\n",
136 				opts->device_type);
137 		return -EINVAL;
138 	}
139 
140 	nb_lcores = rte_lcore_count() - 1;
141 
142 	if (nb_lcores < 1) {
143 		RTE_LOG(ERR, USER1,
144 			"Number of enabled cores need to be higher than 1\n");
145 		return -EINVAL;
146 	}
147 
148 	/*
149 	 * Use less number of devices,
150 	 * if there are more available than cores.
151 	 */
152 	if (enabled_cdev_count > nb_lcores)
153 		enabled_cdev_count = nb_lcores;
154 
155 	/* Create a mempool shared by all the devices */
156 	uint32_t max_sess_size = 0, sess_size;
157 
158 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
159 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
160 		if (sess_size > max_sess_size)
161 			max_sess_size = sess_size;
162 	}
163 #ifdef RTE_LIB_SECURITY
164 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
165 		sess_size = rte_security_session_get_size(
166 				rte_cryptodev_get_sec_ctx(cdev_id));
167 		if (sess_size > max_sess_size)
168 			max_sess_size = sess_size;
169 	}
170 #endif
171 	/*
172 	 * Calculate number of needed queue pairs, based on the amount
173 	 * of available number of logical cores and crypto devices.
174 	 * For instance, if there are 4 cores and 2 crypto devices,
175 	 * 2 queue pairs will be set up per device.
176 	 */
177 	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
178 				(nb_lcores / enabled_cdev_count) + 1 :
179 				nb_lcores / enabled_cdev_count;
180 
181 	for (i = 0; i < enabled_cdev_count &&
182 			i < RTE_CRYPTO_MAX_DEVS; i++) {
183 		cdev_id = enabled_cdevs[i];
184 #ifdef RTE_CRYPTO_SCHEDULER
185 		/*
186 		 * If multi-core scheduler is used, limit the number
187 		 * of queue pairs to 1, as there is no way to know
188 		 * how many cores are being used by the PMD, and
189 		 * how many will be available for the application.
190 		 */
191 		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
192 				rte_cryptodev_scheduler_mode_get(cdev_id) ==
193 				CDEV_SCHED_MODE_MULTICORE)
194 			opts->nb_qps = 1;
195 #endif
196 
197 		struct rte_cryptodev_info cdev_info;
198 		int socket_id = rte_cryptodev_socket_id(cdev_id);
199 
200 		/* Use the first socket if SOCKET_ID_ANY is returned. */
201 		if (socket_id == SOCKET_ID_ANY)
202 			socket_id = 0;
203 
204 		rte_cryptodev_info_get(cdev_id, &cdev_info);
205 
206 		if (opts->op_type == CPERF_ASYM_MODEX) {
207 			if ((cdev_info.feature_flags &
208 			     RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) == 0)
209 				continue;
210 		}
211 
212 		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
213 			printf("Number of needed queue pairs is higher "
214 				"than the maximum number of queue pairs "
215 				"per device.\n");
216 			printf("Lower the number of cores or increase "
217 				"the number of crypto devices\n");
218 			return -EINVAL;
219 		}
220 		struct rte_cryptodev_config conf = {
221 			.nb_queue_pairs = opts->nb_qps,
222 			.socket_id = socket_id,
223 		};
224 
225 		switch (opts->op_type) {
226 		case CPERF_ASYM_MODEX:
227 			conf.ff_disable |= (RTE_CRYPTODEV_FF_SECURITY |
228 					    RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO);
229 			break;
230 		case CPERF_CIPHER_ONLY:
231 		case CPERF_AUTH_ONLY:
232 		case CPERF_CIPHER_THEN_AUTH:
233 		case CPERF_AUTH_THEN_CIPHER:
234 		case CPERF_AEAD:
235 			conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY;
236 			/* Fall through */
237 		case CPERF_PDCP:
238 		case CPERF_DOCSIS:
239 		case CPERF_IPSEC:
240 		case CPERF_TLS:
241 			/* Fall through */
242 		default:
243 			conf.ff_disable |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
244 		}
245 
246 		struct rte_cryptodev_qp_conf qp_conf = {
247 			.nb_descriptors = opts->nb_descriptors
248 		};
249 
250 		/**
251 		 * Device info specifies the min headroom and tailroom
252 		 * requirement for the crypto PMD. This need to be honoured
253 		 * by the application, while creating mbuf.
254 		 */
255 		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
256 			/* Update headroom */
257 			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
258 		}
259 		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
260 			/* Update tailroom */
261 			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
262 		}
263 
264 		/* Update segment size to include headroom & tailroom */
265 		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
266 
267 		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
268 		if (!strcmp((const char *)opts->device_type,
269 					"crypto_scheduler")) {
270 #ifdef RTE_CRYPTO_SCHEDULER
271 			uint32_t nb_workers =
272 				rte_cryptodev_scheduler_workers_get(cdev_id,
273 								NULL);
274 			/* scheduler session header per lcore + 1 session per worker qp */
275 			sessions_needed = nb_lcores + enabled_cdev_count *
276 				opts->nb_qps * nb_workers;
277 #endif
278 		} else
279 			sessions_needed = enabled_cdev_count * opts->nb_qps;
280 
281 		/*
282 		 * A single session is required per queue pair
283 		 * in each device
284 		 */
285 		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
286 			RTE_LOG(ERR, USER1,
287 				"Device does not support at least "
288 				"%u sessions\n", opts->nb_qps);
289 			return -ENOTSUP;
290 		}
291 
292 		if (opts->op_type == CPERF_ASYM_MODEX)
293 			ret = create_asym_op_pool_socket(socket_id,
294 							 sessions_needed);
295 		else
296 			ret = fill_session_pool_socket(socket_id, max_sess_size,
297 						       sessions_needed);
298 		if (ret < 0)
299 			return ret;
300 
301 		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
302 
303 		if (opts->op_type == CPERF_ASYM_MODEX) {
304 			qp_conf.mp_session = NULL;
305 		}
306 
307 		ret = rte_cryptodev_configure(cdev_id, &conf);
308 		if (ret < 0) {
309 			printf("Failed to configure cryptodev %u", cdev_id);
310 			return -EINVAL;
311 		}
312 
313 		for (j = 0; j < opts->nb_qps; j++) {
314 			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
315 				&qp_conf, socket_id);
316 			if (ret < 0) {
317 				printf("Failed to setup queue pair %u on "
318 					"cryptodev %u",	j, cdev_id);
319 				return -EINVAL;
320 			}
321 		}
322 
323 		ret = rte_cryptodev_start(cdev_id);
324 		if (ret < 0) {
325 			printf("Failed to start device %u: error %d\n",
326 					cdev_id, ret);
327 			return -EPERM;
328 		}
329 	}
330 
331 	return enabled_cdev_count;
332 }
333 
334 static int
335 cperf_verify_devices_capabilities(struct cperf_options *opts,
336 		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
337 {
338 	struct rte_cryptodev_sym_capability_idx cap_idx;
339 	const struct rte_cryptodev_symmetric_capability *capability;
340 	struct rte_cryptodev_asym_capability_idx asym_cap_idx;
341 	const struct rte_cryptodev_asymmetric_xform_capability *asym_capability;
342 
343 
344 	uint8_t i, cdev_id;
345 	int ret;
346 
347 	for (i = 0; i < nb_cryptodevs; i++) {
348 
349 		cdev_id = enabled_cdevs[i];
350 
351 		if (opts->op_type == CPERF_ASYM_MODEX) {
352 			asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_MODEX;
353 			asym_capability = rte_cryptodev_asym_capability_get(
354 				cdev_id, &asym_cap_idx);
355 			if (asym_capability == NULL)
356 				return -1;
357 
358 			ret = rte_cryptodev_asym_xform_capability_check_modlen(
359 				asym_capability, opts->modex_data->modulus.len);
360 			if (ret != 0)
361 				return ret;
362 
363 		}
364 
365 		if (opts->op_type == CPERF_AUTH_ONLY ||
366 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
367 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
368 
369 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
370 			cap_idx.algo.auth = opts->auth_algo;
371 
372 			capability = rte_cryptodev_sym_capability_get(cdev_id,
373 					&cap_idx);
374 			if (capability == NULL)
375 				return -1;
376 
377 			ret = rte_cryptodev_sym_capability_check_auth(
378 					capability,
379 					opts->auth_key_sz,
380 					opts->digest_sz,
381 					opts->auth_iv_sz);
382 			if (ret != 0)
383 				return ret;
384 		}
385 
386 		if (opts->op_type == CPERF_CIPHER_ONLY ||
387 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
388 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
389 
390 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
391 			cap_idx.algo.cipher = opts->cipher_algo;
392 
393 			capability = rte_cryptodev_sym_capability_get(cdev_id,
394 					&cap_idx);
395 			if (capability == NULL)
396 				return -1;
397 
398 			ret = rte_cryptodev_sym_capability_check_cipher(
399 					capability,
400 					opts->cipher_key_sz,
401 					opts->cipher_iv_sz);
402 			if (ret != 0)
403 				return ret;
404 		}
405 
406 		if (opts->op_type == CPERF_AEAD) {
407 
408 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
409 			cap_idx.algo.aead = opts->aead_algo;
410 
411 			capability = rte_cryptodev_sym_capability_get(cdev_id,
412 					&cap_idx);
413 			if (capability == NULL)
414 				return -1;
415 
416 			ret = rte_cryptodev_sym_capability_check_aead(
417 					capability,
418 					opts->aead_key_sz,
419 					opts->digest_sz,
420 					opts->aead_aad_sz,
421 					opts->aead_iv_sz);
422 			if (ret != 0)
423 				return ret;
424 		}
425 	}
426 
427 	return 0;
428 }
429 
430 static int
431 cperf_check_test_vector(struct cperf_options *opts,
432 		struct cperf_test_vector *test_vec)
433 {
434 	if (opts->op_type == CPERF_CIPHER_ONLY) {
435 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
436 			if (test_vec->plaintext.data == NULL)
437 				return -1;
438 		} else {
439 			if (test_vec->plaintext.data == NULL)
440 				return -1;
441 			if (test_vec->plaintext.length < opts->max_buffer_size)
442 				return -1;
443 			if (test_vec->ciphertext.data == NULL)
444 				return -1;
445 			if (test_vec->ciphertext.length < opts->max_buffer_size)
446 				return -1;
447 			/* Cipher IV is only required for some algorithms */
448 			if (opts->cipher_iv_sz &&
449 					test_vec->cipher_iv.data == NULL)
450 				return -1;
451 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
452 				return -1;
453 			if (test_vec->cipher_key.data == NULL)
454 				return -1;
455 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
456 				return -1;
457 		}
458 	} else if (opts->op_type == CPERF_AUTH_ONLY) {
459 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
460 			if (test_vec->plaintext.data == NULL)
461 				return -1;
462 			if (test_vec->plaintext.length < opts->max_buffer_size)
463 				return -1;
464 			/* Auth key is only required for some algorithms */
465 			if (opts->auth_key_sz &&
466 					test_vec->auth_key.data == NULL)
467 				return -1;
468 			if (test_vec->auth_key.length != opts->auth_key_sz)
469 				return -1;
470 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
471 				return -1;
472 			/* Auth IV is only required for some algorithms */
473 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
474 				return -1;
475 			if (test_vec->digest.data == NULL)
476 				return -1;
477 			if (test_vec->digest.length < opts->digest_sz)
478 				return -1;
479 		}
480 
481 	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
482 			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
483 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
484 			if (test_vec->plaintext.data == NULL)
485 				return -1;
486 			if (test_vec->plaintext.length < opts->max_buffer_size)
487 				return -1;
488 		} else {
489 			if (test_vec->plaintext.data == NULL)
490 				return -1;
491 			if (test_vec->plaintext.length < opts->max_buffer_size)
492 				return -1;
493 			if (test_vec->ciphertext.data == NULL)
494 				return -1;
495 			if (test_vec->ciphertext.length < opts->max_buffer_size)
496 				return -1;
497 			if (test_vec->cipher_iv.data == NULL)
498 				return -1;
499 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
500 				return -1;
501 			if (test_vec->cipher_key.data == NULL)
502 				return -1;
503 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
504 				return -1;
505 		}
506 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
507 			if (test_vec->auth_key.data == NULL)
508 				return -1;
509 			if (test_vec->auth_key.length != opts->auth_key_sz)
510 				return -1;
511 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
512 				return -1;
513 			/* Auth IV is only required for some algorithms */
514 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
515 				return -1;
516 			if (test_vec->digest.data == NULL)
517 				return -1;
518 			if (test_vec->digest.length < opts->digest_sz)
519 				return -1;
520 		}
521 	} else if (opts->op_type == CPERF_AEAD) {
522 		if (test_vec->plaintext.data == NULL)
523 			return -1;
524 		if (test_vec->plaintext.length < opts->max_buffer_size)
525 			return -1;
526 		if (test_vec->ciphertext.data == NULL)
527 			return -1;
528 		if (test_vec->ciphertext.length < opts->max_buffer_size)
529 			return -1;
530 		if (test_vec->aead_key.data == NULL)
531 			return -1;
532 		if (test_vec->aead_key.length != opts->aead_key_sz)
533 			return -1;
534 		if (test_vec->aead_iv.data == NULL)
535 			return -1;
536 		if (test_vec->aead_iv.length != opts->aead_iv_sz)
537 			return -1;
538 		if (test_vec->aad.data == NULL)
539 			return -1;
540 		if (test_vec->aad.length != opts->aead_aad_sz)
541 			return -1;
542 		if (test_vec->digest.data == NULL)
543 			return -1;
544 		if (test_vec->digest.length < opts->digest_sz)
545 			return -1;
546 	}
547 	return 0;
548 }
549 
550 int
551 main(int argc, char **argv)
552 {
553 	struct cperf_options opts = {0};
554 	struct cperf_test_vector *t_vec = NULL;
555 	struct cperf_op_fns op_fns;
556 	void *ctx[RTE_MAX_LCORE] = { };
557 	int nb_cryptodevs = 0;
558 	uint16_t total_nb_qps = 0;
559 	uint8_t cdev_id, i;
560 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
561 
562 	uint8_t buffer_size_idx = 0;
563 
564 	int ret;
565 	uint32_t lcore_id;
566 	bool cap_unsupported = false;
567 
568 	/* Initialise DPDK EAL */
569 	ret = rte_eal_init(argc, argv);
570 	if (ret < 0)
571 		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
572 	argc -= ret;
573 	argv += ret;
574 
575 	cperf_options_default(&opts);
576 
577 	ret = cperf_options_parse(&opts, argc, argv);
578 	if (ret) {
579 		RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n");
580 		goto err;
581 	}
582 
583 	ret = cperf_options_check(&opts);
584 	if (ret) {
585 		RTE_LOG(ERR, USER1,
586 				"Checking one or more user options failed\n");
587 		goto err;
588 	}
589 
590 	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
591 
592 	if (!opts.silent)
593 		cperf_options_dump(&opts);
594 
595 	if (nb_cryptodevs < 1) {
596 		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
597 				"device type\n");
598 		nb_cryptodevs = 0;
599 		goto err;
600 	}
601 
602 	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
603 			nb_cryptodevs);
604 	if (ret) {
605 		RTE_LOG(ERR, USER1, "Crypto device type does not support "
606 				"capabilities requested\n");
607 		cap_unsupported = true;
608 		goto err;
609 	}
610 
611 	if (opts.test_file != NULL) {
612 		t_vec = cperf_test_vector_get_from_file(&opts);
613 		if (t_vec == NULL) {
614 			RTE_LOG(ERR, USER1,
615 					"Failed to create test vector for"
616 					" specified file\n");
617 			goto err;
618 		}
619 
620 		if (cperf_check_test_vector(&opts, t_vec)) {
621 			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
622 					"\n");
623 			goto err;
624 		}
625 	} else {
626 		t_vec = cperf_test_vector_get_dummy(&opts);
627 		if (t_vec == NULL) {
628 			RTE_LOG(ERR, USER1,
629 					"Failed to create test vector for"
630 					" specified algorithms\n");
631 			goto err;
632 		}
633 	}
634 
635 	ret = cperf_get_op_functions(&opts, &op_fns);
636 	if (ret) {
637 		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
638 				"specified algorithms combination\n");
639 		goto err;
640 	}
641 
642 	if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
643 			opts.test != CPERF_TEST_TYPE_LATENCY)
644 		show_test_vector(t_vec);
645 
646 	total_nb_qps = nb_cryptodevs * opts.nb_qps;
647 
648 	i = 0;
649 	uint8_t qp_id = 0, cdev_index = 0;
650 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
651 
652 		if (i == total_nb_qps)
653 			break;
654 
655 		cdev_id = enabled_cdevs[cdev_index];
656 
657 		int socket_id = rte_cryptodev_socket_id(cdev_id);
658 
659 		/* Use the first socket if SOCKET_ID_ANY is returned. */
660 		if (socket_id == SOCKET_ID_ANY)
661 			socket_id = 0;
662 
663 		ctx[i] = cperf_testmap[opts.test].constructor(
664 				session_pool_socket[socket_id].sess_mp,
665 				cdev_id, qp_id,
666 				&opts, t_vec, &op_fns);
667 		if (ctx[i] == NULL) {
668 			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
669 			goto err;
670 		}
671 		qp_id = (qp_id + 1) % opts.nb_qps;
672 		if (qp_id == 0)
673 			cdev_index++;
674 		i++;
675 	}
676 
677 	if (opts.imix_distribution_count != 0) {
678 		uint8_t buffer_size_count = opts.buffer_size_count;
679 		uint16_t distribution_total[buffer_size_count];
680 		uint32_t op_idx;
681 		uint32_t test_average_size = 0;
682 		const uint32_t *buffer_size_list = opts.buffer_size_list;
683 		const uint32_t *imix_distribution_list = opts.imix_distribution_list;
684 
685 		opts.imix_buffer_sizes = rte_malloc(NULL,
686 					sizeof(uint32_t) * opts.pool_sz,
687 					0);
688 		/*
689 		 * Calculate accumulated distribution of
690 		 * probabilities per packet size
691 		 */
692 		distribution_total[0] = imix_distribution_list[0];
693 		for (i = 1; i < buffer_size_count; i++)
694 			distribution_total[i] = imix_distribution_list[i] +
695 				distribution_total[i-1];
696 
697 		/* Calculate a random sequence of packet sizes, based on distribution */
698 		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
699 			uint16_t random_number = rte_rand() %
700 				distribution_total[buffer_size_count - 1];
701 			for (i = 0; i < buffer_size_count; i++)
702 				if (random_number < distribution_total[i])
703 					break;
704 
705 			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
706 		}
707 
708 		/* Calculate average buffer size for the IMIX distribution */
709 		for (i = 0; i < buffer_size_count; i++)
710 			test_average_size += buffer_size_list[i] *
711 				imix_distribution_list[i];
712 
713 		opts.test_buffer_size = test_average_size /
714 				distribution_total[buffer_size_count - 1];
715 
716 		i = 0;
717 		RTE_LCORE_FOREACH_WORKER(lcore_id) {
718 
719 			if (i == total_nb_qps)
720 				break;
721 
722 			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
723 				ctx[i], lcore_id);
724 			i++;
725 		}
726 		i = 0;
727 		RTE_LCORE_FOREACH_WORKER(lcore_id) {
728 
729 			if (i == total_nb_qps)
730 				break;
731 			ret |= rte_eal_wait_lcore(lcore_id);
732 			i++;
733 		}
734 
735 		if (ret != EXIT_SUCCESS)
736 			goto err;
737 	} else {
738 
739 		/* Get next size from range or list */
740 		if (opts.inc_buffer_size != 0)
741 			opts.test_buffer_size = opts.min_buffer_size;
742 		else
743 			opts.test_buffer_size = opts.buffer_size_list[0];
744 
745 		while (opts.test_buffer_size <= opts.max_buffer_size) {
746 			i = 0;
747 			RTE_LCORE_FOREACH_WORKER(lcore_id) {
748 
749 				if (i == total_nb_qps)
750 					break;
751 
752 				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
753 					ctx[i], lcore_id);
754 				i++;
755 			}
756 			i = 0;
757 			RTE_LCORE_FOREACH_WORKER(lcore_id) {
758 
759 				if (i == total_nb_qps)
760 					break;
761 				ret |= rte_eal_wait_lcore(lcore_id);
762 				i++;
763 			}
764 
765 			if (ret != EXIT_SUCCESS)
766 				goto err;
767 
768 			/* Get next size from range or list */
769 			if (opts.inc_buffer_size != 0)
770 				opts.test_buffer_size += opts.inc_buffer_size;
771 			else {
772 				if (++buffer_size_idx == opts.buffer_size_count)
773 					break;
774 				opts.test_buffer_size =
775 					opts.buffer_size_list[buffer_size_idx];
776 			}
777 		}
778 	}
779 
780 	i = 0;
781 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
782 
783 		if (i == total_nb_qps)
784 			break;
785 
786 		cperf_testmap[opts.test].destructor(ctx[i]);
787 		i++;
788 	}
789 
790 	for (i = 0; i < nb_cryptodevs &&
791 			i < RTE_CRYPTO_MAX_DEVS; i++) {
792 		rte_cryptodev_stop(enabled_cdevs[i]);
793 		ret = rte_cryptodev_close(enabled_cdevs[i]);
794 		if (ret)
795 			RTE_LOG(ERR, USER1,
796 					"Crypto device close error %d\n", ret);
797 	}
798 
799 	free_test_vector(t_vec, &opts);
800 
801 	printf("\n");
802 	return EXIT_SUCCESS;
803 
804 err:
805 	i = 0;
806 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
807 		if (i == total_nb_qps)
808 			break;
809 
810 		if (ctx[i] && cperf_testmap[opts.test].destructor)
811 			cperf_testmap[opts.test].destructor(ctx[i]);
812 		i++;
813 	}
814 
815 	for (i = 0; i < nb_cryptodevs &&
816 			i < RTE_CRYPTO_MAX_DEVS; i++) {
817 		rte_cryptodev_stop(enabled_cdevs[i]);
818 		ret = rte_cryptodev_close(enabled_cdevs[i]);
819 		if (ret)
820 			RTE_LOG(ERR, USER1,
821 					"Crypto device close error %d\n", ret);
822 
823 	}
824 	rte_free(opts.imix_buffer_sizes);
825 	free_test_vector(t_vec, &opts);
826 
827 	if (rte_errno == ENOTSUP || cap_unsupported) {
828 		RTE_LOG(ERR, USER1, "Unsupported case: errno: %u\n", rte_errno);
829 		return -ENOTSUP;
830 	}
831 	printf("\n");
832 	return EXIT_FAILURE;
833 }
834