1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <unistd.h> 7 8 #include <rte_malloc.h> 9 #include <rte_random.h> 10 #include <rte_eal.h> 11 #include <rte_cryptodev.h> 12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 13 #include <rte_cryptodev_scheduler.h> 14 #endif 15 16 #include "cperf.h" 17 #include "cperf_options.h" 18 #include "cperf_test_vector_parsing.h" 19 #include "cperf_test_throughput.h" 20 #include "cperf_test_latency.h" 21 #include "cperf_test_verify.h" 22 #include "cperf_test_pmd_cyclecount.h" 23 24 static struct { 25 struct rte_mempool *sess_mp; 26 struct rte_mempool *priv_mp; 27 } session_pool_socket[RTE_MAX_NUMA_NODES]; 28 29 const char *cperf_test_type_strs[] = { 30 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput", 31 [CPERF_TEST_TYPE_LATENCY] = "latency", 32 [CPERF_TEST_TYPE_VERIFY] = "verify", 33 [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount" 34 }; 35 36 const char *cperf_op_type_strs[] = { 37 [CPERF_CIPHER_ONLY] = "cipher-only", 38 [CPERF_AUTH_ONLY] = "auth-only", 39 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth", 40 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher", 41 [CPERF_AEAD] = "aead", 42 [CPERF_PDCP] = "pdcp", 43 [CPERF_DOCSIS] = "docsis" 44 }; 45 46 const struct cperf_test cperf_testmap[] = { 47 [CPERF_TEST_TYPE_THROUGHPUT] = { 48 cperf_throughput_test_constructor, 49 cperf_throughput_test_runner, 50 cperf_throughput_test_destructor 51 }, 52 [CPERF_TEST_TYPE_LATENCY] = { 53 cperf_latency_test_constructor, 54 cperf_latency_test_runner, 55 cperf_latency_test_destructor 56 }, 57 [CPERF_TEST_TYPE_VERIFY] = { 58 cperf_verify_test_constructor, 59 cperf_verify_test_runner, 60 cperf_verify_test_destructor 61 }, 62 [CPERF_TEST_TYPE_PMDCC] = { 63 cperf_pmd_cyclecount_test_constructor, 64 cperf_pmd_cyclecount_test_runner, 65 cperf_pmd_cyclecount_test_destructor 66 } 67 }; 68 69 static int 70 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size, 71 uint32_t nb_sessions) 72 { 73 char mp_name[RTE_MEMPOOL_NAMESIZE]; 74 struct rte_mempool *sess_mp; 75 76 if (session_pool_socket[socket_id].priv_mp == NULL) { 77 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 78 "priv_sess_mp_%u", socket_id); 79 80 sess_mp = rte_mempool_create(mp_name, 81 nb_sessions, 82 session_priv_size, 83 0, 0, NULL, NULL, NULL, 84 NULL, socket_id, 85 0); 86 87 if (sess_mp == NULL) { 88 printf("Cannot create pool \"%s\" on socket %d\n", 89 mp_name, socket_id); 90 return -ENOMEM; 91 } 92 93 printf("Allocated pool \"%s\" on socket %d\n", 94 mp_name, socket_id); 95 session_pool_socket[socket_id].priv_mp = sess_mp; 96 } 97 98 if (session_pool_socket[socket_id].sess_mp == NULL) { 99 100 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 101 "sess_mp_%u", socket_id); 102 103 sess_mp = rte_cryptodev_sym_session_pool_create(mp_name, 104 nb_sessions, 0, 0, 0, socket_id); 105 106 if (sess_mp == NULL) { 107 printf("Cannot create pool \"%s\" on socket %d\n", 108 mp_name, socket_id); 109 return -ENOMEM; 110 } 111 112 printf("Allocated pool \"%s\" on socket %d\n", 113 mp_name, socket_id); 114 session_pool_socket[socket_id].sess_mp = sess_mp; 115 } 116 117 return 0; 118 } 119 120 static int 121 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) 122 { 123 uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id; 124 uint32_t sessions_needed = 0; 125 unsigned int i, j; 126 int ret; 127 128 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type, 129 enabled_cdevs, RTE_CRYPTO_MAX_DEVS); 130 if (enabled_cdev_count == 0) { 131 printf("No crypto devices type %s available\n", 132 opts->device_type); 133 return -EINVAL; 134 } 135 136 nb_lcores = rte_lcore_count() - 1; 137 138 if (nb_lcores < 1) { 139 RTE_LOG(ERR, USER1, 140 "Number of enabled cores need to be higher than 1\n"); 141 return -EINVAL; 142 } 143 144 /* 145 * Use less number of devices, 146 * if there are more available than cores. 147 */ 148 if (enabled_cdev_count > nb_lcores) 149 enabled_cdev_count = nb_lcores; 150 151 /* Create a mempool shared by all the devices */ 152 uint32_t max_sess_size = 0, sess_size; 153 154 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { 155 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id); 156 if (sess_size > max_sess_size) 157 max_sess_size = sess_size; 158 } 159 160 /* 161 * Calculate number of needed queue pairs, based on the amount 162 * of available number of logical cores and crypto devices. 163 * For instance, if there are 4 cores and 2 crypto devices, 164 * 2 queue pairs will be set up per device. 165 */ 166 opts->nb_qps = (nb_lcores % enabled_cdev_count) ? 167 (nb_lcores / enabled_cdev_count) + 1 : 168 nb_lcores / enabled_cdev_count; 169 170 for (i = 0; i < enabled_cdev_count && 171 i < RTE_CRYPTO_MAX_DEVS; i++) { 172 cdev_id = enabled_cdevs[i]; 173 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 174 /* 175 * If multi-core scheduler is used, limit the number 176 * of queue pairs to 1, as there is no way to know 177 * how many cores are being used by the PMD, and 178 * how many will be available for the application. 179 */ 180 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") && 181 rte_cryptodev_scheduler_mode_get(cdev_id) == 182 CDEV_SCHED_MODE_MULTICORE) 183 opts->nb_qps = 1; 184 #endif 185 186 struct rte_cryptodev_info cdev_info; 187 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); 188 /* range check the socket_id - negative values become big 189 * positive ones due to use of unsigned value 190 */ 191 if (socket_id >= RTE_MAX_NUMA_NODES) 192 socket_id = 0; 193 194 rte_cryptodev_info_get(cdev_id, &cdev_info); 195 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) { 196 printf("Number of needed queue pairs is higher " 197 "than the maximum number of queue pairs " 198 "per device.\n"); 199 printf("Lower the number of cores or increase " 200 "the number of crypto devices\n"); 201 return -EINVAL; 202 } 203 struct rte_cryptodev_config conf = { 204 .nb_queue_pairs = opts->nb_qps, 205 .socket_id = socket_id, 206 .ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO, 207 }; 208 209 if (opts->op_type != CPERF_PDCP && 210 opts->op_type != CPERF_DOCSIS) 211 conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY; 212 213 struct rte_cryptodev_qp_conf qp_conf = { 214 .nb_descriptors = opts->nb_descriptors 215 }; 216 217 /** 218 * Device info specifies the min headroom and tailroom 219 * requirement for the crypto PMD. This need to be honoured 220 * by the application, while creating mbuf. 221 */ 222 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) { 223 /* Update headroom */ 224 opts->headroom_sz = cdev_info.min_mbuf_headroom_req; 225 } 226 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) { 227 /* Update tailroom */ 228 opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req; 229 } 230 231 /* Update segment size to include headroom & tailroom */ 232 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz); 233 234 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions; 235 /* 236 * Two sessions objects are required for each session 237 * (one for the header, one for the private data) 238 */ 239 if (!strcmp((const char *)opts->device_type, 240 "crypto_scheduler")) { 241 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER 242 uint32_t nb_slaves = 243 rte_cryptodev_scheduler_slaves_get(cdev_id, 244 NULL); 245 246 sessions_needed = enabled_cdev_count * 247 opts->nb_qps * nb_slaves; 248 #endif 249 } else 250 sessions_needed = enabled_cdev_count * 251 opts->nb_qps * 2; 252 253 /* 254 * A single session is required per queue pair 255 * in each device 256 */ 257 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) { 258 RTE_LOG(ERR, USER1, 259 "Device does not support at least " 260 "%u sessions\n", opts->nb_qps); 261 return -ENOTSUP; 262 } 263 264 ret = fill_session_pool_socket(socket_id, max_sess_size, 265 sessions_needed); 266 if (ret < 0) 267 return ret; 268 269 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp; 270 qp_conf.mp_session_private = 271 session_pool_socket[socket_id].priv_mp; 272 273 ret = rte_cryptodev_configure(cdev_id, &conf); 274 if (ret < 0) { 275 printf("Failed to configure cryptodev %u", cdev_id); 276 return -EINVAL; 277 } 278 279 for (j = 0; j < opts->nb_qps; j++) { 280 ret = rte_cryptodev_queue_pair_setup(cdev_id, j, 281 &qp_conf, socket_id); 282 if (ret < 0) { 283 printf("Failed to setup queue pair %u on " 284 "cryptodev %u", j, cdev_id); 285 return -EINVAL; 286 } 287 } 288 289 ret = rte_cryptodev_start(cdev_id); 290 if (ret < 0) { 291 printf("Failed to start device %u: error %d\n", 292 cdev_id, ret); 293 return -EPERM; 294 } 295 } 296 297 return enabled_cdev_count; 298 } 299 300 static int 301 cperf_verify_devices_capabilities(struct cperf_options *opts, 302 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs) 303 { 304 struct rte_cryptodev_sym_capability_idx cap_idx; 305 const struct rte_cryptodev_symmetric_capability *capability; 306 307 uint8_t i, cdev_id; 308 int ret; 309 310 for (i = 0; i < nb_cryptodevs; i++) { 311 312 cdev_id = enabled_cdevs[i]; 313 314 if (opts->op_type == CPERF_AUTH_ONLY || 315 opts->op_type == CPERF_CIPHER_THEN_AUTH || 316 opts->op_type == CPERF_AUTH_THEN_CIPHER) { 317 318 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; 319 cap_idx.algo.auth = opts->auth_algo; 320 321 capability = rte_cryptodev_sym_capability_get(cdev_id, 322 &cap_idx); 323 if (capability == NULL) 324 return -1; 325 326 ret = rte_cryptodev_sym_capability_check_auth( 327 capability, 328 opts->auth_key_sz, 329 opts->digest_sz, 330 opts->auth_iv_sz); 331 if (ret != 0) 332 return ret; 333 } 334 335 if (opts->op_type == CPERF_CIPHER_ONLY || 336 opts->op_type == CPERF_CIPHER_THEN_AUTH || 337 opts->op_type == CPERF_AUTH_THEN_CIPHER) { 338 339 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 340 cap_idx.algo.cipher = opts->cipher_algo; 341 342 capability = rte_cryptodev_sym_capability_get(cdev_id, 343 &cap_idx); 344 if (capability == NULL) 345 return -1; 346 347 ret = rte_cryptodev_sym_capability_check_cipher( 348 capability, 349 opts->cipher_key_sz, 350 opts->cipher_iv_sz); 351 if (ret != 0) 352 return ret; 353 } 354 355 if (opts->op_type == CPERF_AEAD) { 356 357 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; 358 cap_idx.algo.aead = opts->aead_algo; 359 360 capability = rte_cryptodev_sym_capability_get(cdev_id, 361 &cap_idx); 362 if (capability == NULL) 363 return -1; 364 365 ret = rte_cryptodev_sym_capability_check_aead( 366 capability, 367 opts->aead_key_sz, 368 opts->digest_sz, 369 opts->aead_aad_sz, 370 opts->aead_iv_sz); 371 if (ret != 0) 372 return ret; 373 } 374 } 375 376 return 0; 377 } 378 379 static int 380 cperf_check_test_vector(struct cperf_options *opts, 381 struct cperf_test_vector *test_vec) 382 { 383 if (opts->op_type == CPERF_CIPHER_ONLY) { 384 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { 385 if (test_vec->plaintext.data == NULL) 386 return -1; 387 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 388 if (test_vec->plaintext.data == NULL) 389 return -1; 390 if (test_vec->plaintext.length < opts->max_buffer_size) 391 return -1; 392 if (test_vec->ciphertext.data == NULL) 393 return -1; 394 if (test_vec->ciphertext.length < opts->max_buffer_size) 395 return -1; 396 /* Cipher IV is only required for some algorithms */ 397 if (opts->cipher_iv_sz && 398 test_vec->cipher_iv.data == NULL) 399 return -1; 400 if (test_vec->cipher_iv.length != opts->cipher_iv_sz) 401 return -1; 402 if (test_vec->cipher_key.data == NULL) 403 return -1; 404 if (test_vec->cipher_key.length != opts->cipher_key_sz) 405 return -1; 406 } 407 } else if (opts->op_type == CPERF_AUTH_ONLY) { 408 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) { 409 if (test_vec->plaintext.data == NULL) 410 return -1; 411 if (test_vec->plaintext.length < opts->max_buffer_size) 412 return -1; 413 /* Auth key is only required for some algorithms */ 414 if (opts->auth_key_sz && 415 test_vec->auth_key.data == NULL) 416 return -1; 417 if (test_vec->auth_key.length != opts->auth_key_sz) 418 return -1; 419 if (test_vec->auth_iv.length != opts->auth_iv_sz) 420 return -1; 421 /* Auth IV is only required for some algorithms */ 422 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL) 423 return -1; 424 if (test_vec->digest.data == NULL) 425 return -1; 426 if (test_vec->digest.length < opts->digest_sz) 427 return -1; 428 } 429 430 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH || 431 opts->op_type == CPERF_AUTH_THEN_CIPHER) { 432 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { 433 if (test_vec->plaintext.data == NULL) 434 return -1; 435 if (test_vec->plaintext.length < opts->max_buffer_size) 436 return -1; 437 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) { 438 if (test_vec->plaintext.data == NULL) 439 return -1; 440 if (test_vec->plaintext.length < opts->max_buffer_size) 441 return -1; 442 if (test_vec->ciphertext.data == NULL) 443 return -1; 444 if (test_vec->ciphertext.length < opts->max_buffer_size) 445 return -1; 446 if (test_vec->cipher_iv.data == NULL) 447 return -1; 448 if (test_vec->cipher_iv.length != opts->cipher_iv_sz) 449 return -1; 450 if (test_vec->cipher_key.data == NULL) 451 return -1; 452 if (test_vec->cipher_key.length != opts->cipher_key_sz) 453 return -1; 454 } 455 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) { 456 if (test_vec->auth_key.data == NULL) 457 return -1; 458 if (test_vec->auth_key.length != opts->auth_key_sz) 459 return -1; 460 if (test_vec->auth_iv.length != opts->auth_iv_sz) 461 return -1; 462 /* Auth IV is only required for some algorithms */ 463 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL) 464 return -1; 465 if (test_vec->digest.data == NULL) 466 return -1; 467 if (test_vec->digest.length < opts->digest_sz) 468 return -1; 469 } 470 } else if (opts->op_type == CPERF_AEAD) { 471 if (test_vec->plaintext.data == NULL) 472 return -1; 473 if (test_vec->plaintext.length < opts->max_buffer_size) 474 return -1; 475 if (test_vec->ciphertext.data == NULL) 476 return -1; 477 if (test_vec->ciphertext.length < opts->max_buffer_size) 478 return -1; 479 if (test_vec->aead_key.data == NULL) 480 return -1; 481 if (test_vec->aead_key.length != opts->aead_key_sz) 482 return -1; 483 if (test_vec->aead_iv.data == NULL) 484 return -1; 485 if (test_vec->aead_iv.length != opts->aead_iv_sz) 486 return -1; 487 if (test_vec->aad.data == NULL) 488 return -1; 489 if (test_vec->aad.length != opts->aead_aad_sz) 490 return -1; 491 if (test_vec->digest.data == NULL) 492 return -1; 493 if (test_vec->digest.length < opts->digest_sz) 494 return -1; 495 } 496 return 0; 497 } 498 499 int 500 main(int argc, char **argv) 501 { 502 struct cperf_options opts = {0}; 503 struct cperf_test_vector *t_vec = NULL; 504 struct cperf_op_fns op_fns; 505 void *ctx[RTE_MAX_LCORE] = { }; 506 int nb_cryptodevs = 0; 507 uint16_t total_nb_qps = 0; 508 uint8_t cdev_id, i; 509 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 }; 510 511 uint8_t buffer_size_idx = 0; 512 513 int ret; 514 uint32_t lcore_id; 515 516 /* Initialise DPDK EAL */ 517 ret = rte_eal_init(argc, argv); 518 if (ret < 0) 519 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n"); 520 argc -= ret; 521 argv += ret; 522 523 cperf_options_default(&opts); 524 525 ret = cperf_options_parse(&opts, argc, argv); 526 if (ret) { 527 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n"); 528 goto err; 529 } 530 531 ret = cperf_options_check(&opts); 532 if (ret) { 533 RTE_LOG(ERR, USER1, 534 "Checking on or more user options failed\n"); 535 goto err; 536 } 537 538 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs); 539 540 if (!opts.silent) 541 cperf_options_dump(&opts); 542 543 if (nb_cryptodevs < 1) { 544 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto " 545 "device type\n"); 546 nb_cryptodevs = 0; 547 goto err; 548 } 549 550 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs, 551 nb_cryptodevs); 552 if (ret) { 553 RTE_LOG(ERR, USER1, "Crypto device type does not support " 554 "capabilities requested\n"); 555 goto err; 556 } 557 558 if (opts.test_file != NULL) { 559 t_vec = cperf_test_vector_get_from_file(&opts); 560 if (t_vec == NULL) { 561 RTE_LOG(ERR, USER1, 562 "Failed to create test vector for" 563 " specified file\n"); 564 goto err; 565 } 566 567 if (cperf_check_test_vector(&opts, t_vec)) { 568 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors" 569 "\n"); 570 goto err; 571 } 572 } else { 573 t_vec = cperf_test_vector_get_dummy(&opts); 574 if (t_vec == NULL) { 575 RTE_LOG(ERR, USER1, 576 "Failed to create test vector for" 577 " specified algorithms\n"); 578 goto err; 579 } 580 } 581 582 ret = cperf_get_op_functions(&opts, &op_fns); 583 if (ret) { 584 RTE_LOG(ERR, USER1, "Failed to find function ops set for " 585 "specified algorithms combination\n"); 586 goto err; 587 } 588 589 if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT && 590 opts.test != CPERF_TEST_TYPE_LATENCY) 591 show_test_vector(t_vec); 592 593 total_nb_qps = nb_cryptodevs * opts.nb_qps; 594 595 i = 0; 596 uint8_t qp_id = 0, cdev_index = 0; 597 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 598 599 if (i == total_nb_qps) 600 break; 601 602 cdev_id = enabled_cdevs[cdev_index]; 603 604 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); 605 606 ctx[i] = cperf_testmap[opts.test].constructor( 607 session_pool_socket[socket_id].sess_mp, 608 session_pool_socket[socket_id].priv_mp, 609 cdev_id, qp_id, 610 &opts, t_vec, &op_fns); 611 if (ctx[i] == NULL) { 612 RTE_LOG(ERR, USER1, "Test run constructor failed\n"); 613 goto err; 614 } 615 qp_id = (qp_id + 1) % opts.nb_qps; 616 if (qp_id == 0) 617 cdev_index++; 618 i++; 619 } 620 621 if (opts.imix_distribution_count != 0) { 622 uint8_t buffer_size_count = opts.buffer_size_count; 623 uint16_t distribution_total[buffer_size_count]; 624 uint32_t op_idx; 625 uint32_t test_average_size = 0; 626 const uint32_t *buffer_size_list = opts.buffer_size_list; 627 const uint32_t *imix_distribution_list = opts.imix_distribution_list; 628 629 opts.imix_buffer_sizes = rte_malloc(NULL, 630 sizeof(uint32_t) * opts.pool_sz, 631 0); 632 /* 633 * Calculate accumulated distribution of 634 * probabilities per packet size 635 */ 636 distribution_total[0] = imix_distribution_list[0]; 637 for (i = 1; i < buffer_size_count; i++) 638 distribution_total[i] = imix_distribution_list[i] + 639 distribution_total[i-1]; 640 641 /* Calculate a random sequence of packet sizes, based on distribution */ 642 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) { 643 uint16_t random_number = rte_rand() % 644 distribution_total[buffer_size_count - 1]; 645 for (i = 0; i < buffer_size_count; i++) 646 if (random_number < distribution_total[i]) 647 break; 648 649 opts.imix_buffer_sizes[op_idx] = buffer_size_list[i]; 650 } 651 652 /* Calculate average buffer size for the IMIX distribution */ 653 for (i = 0; i < buffer_size_count; i++) 654 test_average_size += buffer_size_list[i] * 655 imix_distribution_list[i]; 656 657 opts.test_buffer_size = test_average_size / 658 distribution_total[buffer_size_count - 1]; 659 660 i = 0; 661 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 662 663 if (i == total_nb_qps) 664 break; 665 666 rte_eal_remote_launch(cperf_testmap[opts.test].runner, 667 ctx[i], lcore_id); 668 i++; 669 } 670 i = 0; 671 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 672 673 if (i == total_nb_qps) 674 break; 675 ret |= rte_eal_wait_lcore(lcore_id); 676 i++; 677 } 678 679 if (ret != EXIT_SUCCESS) 680 goto err; 681 } else { 682 683 /* Get next size from range or list */ 684 if (opts.inc_buffer_size != 0) 685 opts.test_buffer_size = opts.min_buffer_size; 686 else 687 opts.test_buffer_size = opts.buffer_size_list[0]; 688 689 while (opts.test_buffer_size <= opts.max_buffer_size) { 690 i = 0; 691 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 692 693 if (i == total_nb_qps) 694 break; 695 696 rte_eal_remote_launch(cperf_testmap[opts.test].runner, 697 ctx[i], lcore_id); 698 i++; 699 } 700 i = 0; 701 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 702 703 if (i == total_nb_qps) 704 break; 705 ret |= rte_eal_wait_lcore(lcore_id); 706 i++; 707 } 708 709 if (ret != EXIT_SUCCESS) 710 goto err; 711 712 /* Get next size from range or list */ 713 if (opts.inc_buffer_size != 0) 714 opts.test_buffer_size += opts.inc_buffer_size; 715 else { 716 if (++buffer_size_idx == opts.buffer_size_count) 717 break; 718 opts.test_buffer_size = 719 opts.buffer_size_list[buffer_size_idx]; 720 } 721 } 722 } 723 724 i = 0; 725 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 726 727 if (i == total_nb_qps) 728 break; 729 730 cperf_testmap[opts.test].destructor(ctx[i]); 731 i++; 732 } 733 734 for (i = 0; i < nb_cryptodevs && 735 i < RTE_CRYPTO_MAX_DEVS; i++) 736 rte_cryptodev_stop(enabled_cdevs[i]); 737 738 free_test_vector(t_vec, &opts); 739 740 printf("\n"); 741 return EXIT_SUCCESS; 742 743 err: 744 i = 0; 745 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 746 if (i == total_nb_qps) 747 break; 748 749 if (ctx[i] && cperf_testmap[opts.test].destructor) 750 cperf_testmap[opts.test].destructor(ctx[i]); 751 i++; 752 } 753 754 for (i = 0; i < nb_cryptodevs && 755 i < RTE_CRYPTO_MAX_DEVS; i++) 756 rte_cryptodev_stop(enabled_cdevs[i]); 757 rte_free(opts.imix_buffer_sizes); 758 free_test_vector(t_vec, &opts); 759 760 printf("\n"); 761 return EXIT_FAILURE; 762 } 763