1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <unistd.h> 7 8 #include <rte_malloc.h> 9 #include <rte_random.h> 10 #include <rte_eal.h> 11 #include <rte_cryptodev.h> 12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 13 #include <rte_cryptodev_scheduler.h> 14 #endif 15 16 #include "cperf.h" 17 #include "cperf_options.h" 18 #include "cperf_test_vector_parsing.h" 19 #include "cperf_test_throughput.h" 20 #include "cperf_test_latency.h" 21 #include "cperf_test_verify.h" 22 #include "cperf_test_pmd_cyclecount.h" 23 24 static struct { 25 struct rte_mempool *sess_mp; 26 struct rte_mempool *priv_mp; 27 } session_pool_socket[RTE_MAX_NUMA_NODES]; 28 29 const char *cperf_test_type_strs[] = { 30 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput", 31 [CPERF_TEST_TYPE_LATENCY] = "latency", 32 [CPERF_TEST_TYPE_VERIFY] = "verify", 33 [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount" 34 }; 35 36 const char *cperf_op_type_strs[] = { 37 [CPERF_CIPHER_ONLY] = "cipher-only", 38 [CPERF_AUTH_ONLY] = "auth-only", 39 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth", 40 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher", 41 [CPERF_AEAD] = "aead" 42 }; 43 44 const struct cperf_test cperf_testmap[] = { 45 [CPERF_TEST_TYPE_THROUGHPUT] = { 46 cperf_throughput_test_constructor, 47 cperf_throughput_test_runner, 48 cperf_throughput_test_destructor 49 }, 50 [CPERF_TEST_TYPE_LATENCY] = { 51 cperf_latency_test_constructor, 52 cperf_latency_test_runner, 53 cperf_latency_test_destructor 54 }, 55 [CPERF_TEST_TYPE_VERIFY] = { 56 cperf_verify_test_constructor, 57 cperf_verify_test_runner, 58 cperf_verify_test_destructor 59 }, 60 [CPERF_TEST_TYPE_PMDCC] = { 61 cperf_pmd_cyclecount_test_constructor, 62 cperf_pmd_cyclecount_test_runner, 63 cperf_pmd_cyclecount_test_destructor 64 } 65 }; 66 67 static int 68 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size, 69 uint32_t nb_sessions) 70 { 71 char mp_name[RTE_MEMPOOL_NAMESIZE]; 72 struct rte_mempool *sess_mp; 73 74 if (session_pool_socket[socket_id].priv_mp == NULL) { 75 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 76 "priv_sess_mp_%u", socket_id); 77 78 sess_mp = rte_mempool_create(mp_name, 79 nb_sessions, 80 session_priv_size, 81 0, 0, NULL, NULL, NULL, 82 NULL, socket_id, 83 0); 84 85 if (sess_mp == NULL) { 86 printf("Cannot create pool \"%s\" on socket %d\n", 87 mp_name, socket_id); 88 return -ENOMEM; 89 } 90 91 printf("Allocated pool \"%s\" on socket %d\n", 92 mp_name, socket_id); 93 session_pool_socket[socket_id].priv_mp = sess_mp; 94 } 95 96 if (session_pool_socket[socket_id].sess_mp == NULL) { 97 98 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 99 "sess_mp_%u", socket_id); 100 101 sess_mp = rte_cryptodev_sym_session_pool_create(mp_name, 102 nb_sessions, 0, 0, 0, socket_id); 103 104 if (sess_mp == NULL) { 105 printf("Cannot create pool \"%s\" on socket %d\n", 106 mp_name, socket_id); 107 return -ENOMEM; 108 } 109 110 printf("Allocated pool \"%s\" on socket %d\n", 111 mp_name, socket_id); 112 session_pool_socket[socket_id].sess_mp = sess_mp; 113 } 114 115 return 0; 116 } 117 118 static int 119 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) 120 { 121 uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id; 122 uint32_t sessions_needed = 0; 123 unsigned int i, j; 124 int ret; 125 126 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type, 127 enabled_cdevs, RTE_CRYPTO_MAX_DEVS); 128 if (enabled_cdev_count == 0) { 129 printf("No crypto devices type %s available\n", 130 opts->device_type); 131 return -EINVAL; 132 } 133 134 nb_lcores = rte_lcore_count() - 1; 135 136 if (nb_lcores < 1) { 137 RTE_LOG(ERR, USER1, 138 "Number of enabled cores need to be higher than 1\n"); 139 return -EINVAL; 140 } 141 142 /* 143 * Use less number of devices, 144 * if there are more available than cores. 145 */ 146 if (enabled_cdev_count > nb_lcores) 147 enabled_cdev_count = nb_lcores; 148 149 /* Create a mempool shared by all the devices */ 150 uint32_t max_sess_size = 0, sess_size; 151 152 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { 153 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id); 154 if (sess_size > max_sess_size) 155 max_sess_size = sess_size; 156 } 157 158 /* 159 * Calculate number of needed queue pairs, based on the amount 160 * of available number of logical cores and crypto devices. 161 * For instance, if there are 4 cores and 2 crypto devices, 162 * 2 queue pairs will be set up per device. 163 */ 164 opts->nb_qps = (nb_lcores % enabled_cdev_count) ? 165 (nb_lcores / enabled_cdev_count) + 1 : 166 nb_lcores / enabled_cdev_count; 167 168 for (i = 0; i < enabled_cdev_count && 169 i < RTE_CRYPTO_MAX_DEVS; i++) { 170 cdev_id = enabled_cdevs[i]; 171 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 172 /* 173 * If multi-core scheduler is used, limit the number 174 * of queue pairs to 1, as there is no way to know 175 * how many cores are being used by the PMD, and 176 * how many will be available for the application. 177 */ 178 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") && 179 rte_cryptodev_scheduler_mode_get(cdev_id) == 180 CDEV_SCHED_MODE_MULTICORE) 181 opts->nb_qps = 1; 182 #endif 183 184 struct rte_cryptodev_info cdev_info; 185 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); 186 /* range check the socket_id - negative values become big 187 * positive ones due to use of unsigned value 188 */ 189 if (socket_id >= RTE_MAX_NUMA_NODES) 190 socket_id = 0; 191 192 rte_cryptodev_info_get(cdev_id, &cdev_info); 193 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) { 194 printf("Number of needed queue pairs is higher " 195 "than the maximum number of queue pairs " 196 "per device.\n"); 197 printf("Lower the number of cores or increase " 198 "the number of crypto devices\n"); 199 return -EINVAL; 200 } 201 struct rte_cryptodev_config conf = { 202 .nb_queue_pairs = opts->nb_qps, 203 .socket_id = socket_id, 204 .ff_disable = RTE_CRYPTODEV_FF_SECURITY | 205 RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO, 206 }; 207 208 struct rte_cryptodev_qp_conf qp_conf = { 209 .nb_descriptors = opts->nb_descriptors 210 }; 211 212 /** 213 * Device info specifies the min headroom and tailroom 214 * requirement for the crypto PMD. This need to be honoured 215 * by the application, while creating mbuf. 216 */ 217 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) { 218 /* Update headroom */ 219 opts->headroom_sz = cdev_info.min_mbuf_headroom_req; 220 } 221 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) { 222 /* Update tailroom */ 223 opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req; 224 } 225 226 /* Update segment size to include headroom & tailroom */ 227 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz); 228 229 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions; 230 /* 231 * Two sessions objects are required for each session 232 * (one for the header, one for the private data) 233 */ 234 if (!strcmp((const char *)opts->device_type, 235 "crypto_scheduler")) { 236 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 237 uint32_t nb_slaves = 238 rte_cryptodev_scheduler_slaves_get(cdev_id, 239 NULL); 240 241 sessions_needed = enabled_cdev_count * 242 opts->nb_qps * nb_slaves; 243 #endif 244 } else 245 sessions_needed = enabled_cdev_count * 246 opts->nb_qps; 247 248 /* 249 * A single session is required per queue pair 250 * in each device 251 */ 252 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) { 253 RTE_LOG(ERR, USER1, 254 "Device does not support at least " 255 "%u sessions\n", opts->nb_qps); 256 return -ENOTSUP; 257 } 258 259 ret = fill_session_pool_socket(socket_id, max_sess_size, 260 sessions_needed); 261 if (ret < 0) 262 return ret; 263 264 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp; 265 qp_conf.mp_session_private = 266 session_pool_socket[socket_id].priv_mp; 267 268 ret = rte_cryptodev_configure(cdev_id, &conf); 269 if (ret < 0) { 270 printf("Failed to configure cryptodev %u", cdev_id); 271 return -EINVAL; 272 } 273 274 for (j = 0; j < opts->nb_qps; j++) { 275 ret = rte_cryptodev_queue_pair_setup(cdev_id, j, 276 &qp_conf, socket_id); 277 if (ret < 0) { 278 printf("Failed to setup queue pair %u on " 279 "cryptodev %u", j, cdev_id); 280 return -EINVAL; 281 } 282 } 283 284 ret = rte_cryptodev_start(cdev_id); 285 if (ret < 0) { 286 printf("Failed to start device %u: error %d\n", 287 cdev_id, ret); 288 return -EPERM; 289 } 290 } 291 292 return enabled_cdev_count; 293 } 294 295 static int 296 cperf_verify_devices_capabilities(struct cperf_options *opts, 297 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs) 298 { 299 struct rte_cryptodev_sym_capability_idx cap_idx; 300 const struct rte_cryptodev_symmetric_capability *capability; 301 302 uint8_t i, cdev_id; 303 int ret; 304 305 for (i = 0; i < nb_cryptodevs; i++) { 306 307 cdev_id = enabled_cdevs[i]; 308 309 if (opts->op_type == CPERF_AUTH_ONLY || 310 opts->op_type == CPERF_CIPHER_THEN_AUTH || 311 opts->op_type == CPERF_AUTH_THEN_CIPHER) { 312 313 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; 314 cap_idx.algo.auth = opts->auth_algo; 315 316 capability = rte_cryptodev_sym_capability_get(cdev_id, 317 &cap_idx); 318 if (capability == NULL) 319 return -1; 320 321 ret = rte_cryptodev_sym_capability_check_auth( 322 capability, 323 opts->auth_key_sz, 324 opts->digest_sz, 325 opts->auth_iv_sz); 326 if (ret != 0) 327 return ret; 328 } 329 330 if (opts->op_type == CPERF_CIPHER_ONLY || 331 opts->op_type == CPERF_CIPHER_THEN_AUTH || 332 opts->op_type == CPERF_AUTH_THEN_CIPHER) { 333 334 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 335 cap_idx.algo.cipher = opts->cipher_algo; 336 337 capability = rte_cryptodev_sym_capability_get(cdev_id, 338 &cap_idx); 339 if (capability == NULL) 340 return -1; 341 342 ret = rte_cryptodev_sym_capability_check_cipher( 343 capability, 344 opts->cipher_key_sz, 345 opts->cipher_iv_sz); 346 if (ret != 0) 347 return ret; 348 } 349 350 if (opts->op_type == CPERF_AEAD) { 351 352 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; 353 cap_idx.algo.aead = opts->aead_algo; 354 355 capability = rte_cryptodev_sym_capability_get(cdev_id, 356 &cap_idx); 357 if (capability == NULL) 358 return -1; 359 360 ret = rte_cryptodev_sym_capability_check_aead( 361 capability, 362 opts->aead_key_sz, 363 opts->digest_sz, 364 opts->aead_aad_sz, 365 opts->aead_iv_sz); 366 if (ret != 0) 367 return ret; 368 } 369 } 370 371 return 0; 372 } 373 374 static int 375 cperf_check_test_vector(struct cperf_options *opts, 376 struct cperf_test_vector *test_vec) 377 { 378 if (opts->op_type == CPERF_CIPHER_ONLY) { 379 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { 380 if (test_vec->plaintext.data == NULL) 381 return -1; 382 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 383 if (test_vec->plaintext.data == NULL) 384 return -1; 385 if (test_vec->plaintext.length < opts->max_buffer_size) 386 return -1; 387 if (test_vec->ciphertext.data == NULL) 388 return -1; 389 if (test_vec->ciphertext.length < opts->max_buffer_size) 390 return -1; 391 /* Cipher IV is only required for some algorithms */ 392 if (opts->cipher_iv_sz && 393 test_vec->cipher_iv.data == NULL) 394 return -1; 395 if (test_vec->cipher_iv.length != opts->cipher_iv_sz) 396 return -1; 397 if (test_vec->cipher_key.data == NULL) 398 return -1; 399 if (test_vec->cipher_key.length != opts->cipher_key_sz) 400 return -1; 401 } 402 } else if (opts->op_type == CPERF_AUTH_ONLY) { 403 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) { 404 if (test_vec->plaintext.data == NULL) 405 return -1; 406 if (test_vec->plaintext.length < opts->max_buffer_size) 407 return -1; 408 /* Auth key is only required for some algorithms */ 409 if (opts->auth_key_sz && 410 test_vec->auth_key.data == NULL) 411 return -1; 412 if (test_vec->auth_key.length != opts->auth_key_sz) 413 return -1; 414 if (test_vec->auth_iv.length != opts->auth_iv_sz) 415 return -1; 416 /* Auth IV is only required for some algorithms */ 417 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL) 418 return -1; 419 if (test_vec->digest.data == NULL) 420 return -1; 421 if (test_vec->digest.length < opts->digest_sz) 422 return -1; 423 } 424 425 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH || 426 opts->op_type == CPERF_AUTH_THEN_CIPHER) { 427 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { 428 if (test_vec->plaintext.data == NULL) 429 return -1; 430 if (test_vec->plaintext.length < opts->max_buffer_size) 431 return -1; 432 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 433 if (test_vec->plaintext.data == NULL) 434 return -1; 435 if (test_vec->plaintext.length < opts->max_buffer_size) 436 return -1; 437 if (test_vec->ciphertext.data == NULL) 438 return -1; 439 if (test_vec->ciphertext.length < opts->max_buffer_size) 440 return -1; 441 if (test_vec->cipher_iv.data == NULL) 442 return -1; 443 if (test_vec->cipher_iv.length != opts->cipher_iv_sz) 444 return -1; 445 if (test_vec->cipher_key.data == NULL) 446 return -1; 447 if (test_vec->cipher_key.length != opts->cipher_key_sz) 448 return -1; 449 } 450 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) { 451 if (test_vec->auth_key.data == NULL) 452 return -1; 453 if (test_vec->auth_key.length != opts->auth_key_sz) 454 return -1; 455 if (test_vec->auth_iv.length != opts->auth_iv_sz) 456 return -1; 457 /* Auth IV is only required for some algorithms */ 458 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL) 459 return -1; 460 if (test_vec->digest.data == NULL) 461 return -1; 462 if (test_vec->digest.length < opts->digest_sz) 463 return -1; 464 } 465 } else if (opts->op_type == CPERF_AEAD) { 466 if (test_vec->plaintext.data == NULL) 467 return -1; 468 if (test_vec->plaintext.length < opts->max_buffer_size) 469 return -1; 470 if (test_vec->ciphertext.data == NULL) 471 return -1; 472 if (test_vec->ciphertext.length < opts->max_buffer_size) 473 return -1; 474 if (test_vec->aead_key.data == NULL) 475 return -1; 476 if (test_vec->aead_key.length != opts->aead_key_sz) 477 return -1; 478 if (test_vec->aead_iv.data == NULL) 479 return -1; 480 if (test_vec->aead_iv.length != opts->aead_iv_sz) 481 return -1; 482 if (test_vec->aad.data == NULL) 483 return -1; 484 if (test_vec->aad.length != opts->aead_aad_sz) 485 return -1; 486 if (test_vec->digest.data == NULL) 487 return -1; 488 if (test_vec->digest.length < opts->digest_sz) 489 return -1; 490 } 491 return 0; 492 } 493 494 int 495 main(int argc, char **argv) 496 { 497 struct cperf_options opts = {0}; 498 struct cperf_test_vector *t_vec = NULL; 499 struct cperf_op_fns op_fns; 500 void *ctx[RTE_MAX_LCORE] = { }; 501 int nb_cryptodevs = 0; 502 uint16_t total_nb_qps = 0; 503 uint8_t cdev_id, i; 504 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 }; 505 506 uint8_t buffer_size_idx = 0; 507 508 int ret; 509 uint32_t lcore_id; 510 511 /* Initialise DPDK EAL */ 512 ret = rte_eal_init(argc, argv); 513 if (ret < 0) 514 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n"); 515 argc -= ret; 516 argv += ret; 517 518 cperf_options_default(&opts); 519 520 ret = cperf_options_parse(&opts, argc, argv); 521 if (ret) { 522 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n"); 523 goto err; 524 } 525 526 ret = cperf_options_check(&opts); 527 if (ret) { 528 RTE_LOG(ERR, USER1, 529 "Checking on or more user options failed\n"); 530 goto err; 531 } 532 533 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs); 534 535 if (!opts.silent) 536 cperf_options_dump(&opts); 537 538 if (nb_cryptodevs < 1) { 539 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto " 540 "device type\n"); 541 nb_cryptodevs = 0; 542 goto err; 543 } 544 545 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs, 546 nb_cryptodevs); 547 if (ret) { 548 RTE_LOG(ERR, USER1, "Crypto device type does not support " 549 "capabilities requested\n"); 550 goto err; 551 } 552 553 if (opts.test_file != NULL) { 554 t_vec = cperf_test_vector_get_from_file(&opts); 555 if (t_vec == NULL) { 556 RTE_LOG(ERR, USER1, 557 "Failed to create test vector for" 558 " specified file\n"); 559 goto err; 560 } 561 562 if (cperf_check_test_vector(&opts, t_vec)) { 563 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors" 564 "\n"); 565 goto err; 566 } 567 } else { 568 t_vec = cperf_test_vector_get_dummy(&opts); 569 if (t_vec == NULL) { 570 RTE_LOG(ERR, USER1, 571 "Failed to create test vector for" 572 " specified algorithms\n"); 573 goto err; 574 } 575 } 576 577 ret = cperf_get_op_functions(&opts, &op_fns); 578 if (ret) { 579 RTE_LOG(ERR, USER1, "Failed to find function ops set for " 580 "specified algorithms combination\n"); 581 goto err; 582 } 583 584 if (!opts.silent) 585 show_test_vector(t_vec); 586 587 total_nb_qps = nb_cryptodevs * opts.nb_qps; 588 589 i = 0; 590 uint8_t qp_id = 0, cdev_index = 0; 591 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 592 593 if (i == total_nb_qps) 594 break; 595 596 cdev_id = enabled_cdevs[cdev_index]; 597 598 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); 599 600 ctx[i] = cperf_testmap[opts.test].constructor( 601 session_pool_socket[socket_id].sess_mp, 602 session_pool_socket[socket_id].priv_mp, 603 cdev_id, qp_id, 604 &opts, t_vec, &op_fns); 605 if (ctx[i] == NULL) { 606 RTE_LOG(ERR, USER1, "Test run constructor failed\n"); 607 goto err; 608 } 609 qp_id = (qp_id + 1) % opts.nb_qps; 610 if (qp_id == 0) 611 cdev_index++; 612 i++; 613 } 614 615 if (opts.imix_distribution_count != 0) { 616 uint8_t buffer_size_count = opts.buffer_size_count; 617 uint16_t distribution_total[buffer_size_count]; 618 uint32_t op_idx; 619 uint32_t test_average_size = 0; 620 const uint32_t *buffer_size_list = opts.buffer_size_list; 621 const uint32_t *imix_distribution_list = opts.imix_distribution_list; 622 623 opts.imix_buffer_sizes = rte_malloc(NULL, 624 sizeof(uint32_t) * opts.pool_sz, 625 0); 626 /* 627 * Calculate accumulated distribution of 628 * probabilities per packet size 629 */ 630 distribution_total[0] = imix_distribution_list[0]; 631 for (i = 1; i < buffer_size_count; i++) 632 distribution_total[i] = imix_distribution_list[i] + 633 distribution_total[i-1]; 634 635 /* Calculate a random sequence of packet sizes, based on distribution */ 636 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) { 637 uint16_t random_number = rte_rand() % 638 distribution_total[buffer_size_count - 1]; 639 for (i = 0; i < buffer_size_count; i++) 640 if (random_number < distribution_total[i]) 641 break; 642 643 opts.imix_buffer_sizes[op_idx] = buffer_size_list[i]; 644 } 645 646 /* Calculate average buffer size for the IMIX distribution */ 647 for (i = 0; i < buffer_size_count; i++) 648 test_average_size += buffer_size_list[i] * 649 imix_distribution_list[i]; 650 651 opts.test_buffer_size = test_average_size / 652 distribution_total[buffer_size_count - 1]; 653 654 i = 0; 655 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 656 657 if (i == total_nb_qps) 658 break; 659 660 rte_eal_remote_launch(cperf_testmap[opts.test].runner, 661 ctx[i], lcore_id); 662 i++; 663 } 664 i = 0; 665 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 666 667 if (i == total_nb_qps) 668 break; 669 ret |= rte_eal_wait_lcore(lcore_id); 670 i++; 671 } 672 673 if (ret != EXIT_SUCCESS) 674 goto err; 675 } else { 676 677 /* Get next size from range or list */ 678 if (opts.inc_buffer_size != 0) 679 opts.test_buffer_size = opts.min_buffer_size; 680 else 681 opts.test_buffer_size = opts.buffer_size_list[0]; 682 683 while (opts.test_buffer_size <= opts.max_buffer_size) { 684 i = 0; 685 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 686 687 if (i == total_nb_qps) 688 break; 689 690 rte_eal_remote_launch(cperf_testmap[opts.test].runner, 691 ctx[i], lcore_id); 692 i++; 693 } 694 i = 0; 695 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 696 697 if (i == total_nb_qps) 698 break; 699 ret |= rte_eal_wait_lcore(lcore_id); 700 i++; 701 } 702 703 if (ret != EXIT_SUCCESS) 704 goto err; 705 706 /* Get next size from range or list */ 707 if (opts.inc_buffer_size != 0) 708 opts.test_buffer_size += opts.inc_buffer_size; 709 else { 710 if (++buffer_size_idx == opts.buffer_size_count) 711 break; 712 opts.test_buffer_size = 713 opts.buffer_size_list[buffer_size_idx]; 714 } 715 } 716 } 717 718 i = 0; 719 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 720 721 if (i == total_nb_qps) 722 break; 723 724 cperf_testmap[opts.test].destructor(ctx[i]); 725 i++; 726 } 727 728 for (i = 0; i < nb_cryptodevs && 729 i < RTE_CRYPTO_MAX_DEVS; i++) 730 rte_cryptodev_stop(enabled_cdevs[i]); 731 732 free_test_vector(t_vec, &opts); 733 734 printf("\n"); 735 return EXIT_SUCCESS; 736 737 err: 738 i = 0; 739 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 740 if (i == total_nb_qps) 741 break; 742 743 if (ctx[i] && cperf_testmap[opts.test].destructor) 744 cperf_testmap[opts.test].destructor(ctx[i]); 745 i++; 746 } 747 748 for (i = 0; i < nb_cryptodevs && 749 i < RTE_CRYPTO_MAX_DEVS; i++) 750 rte_cryptodev_stop(enabled_cdevs[i]); 751 rte_free(opts.imix_buffer_sizes); 752 free_test_vector(t_vec, &opts); 753 754 printf("\n"); 755 return EXIT_FAILURE; 756 } 757