1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/accel_engine.h" 38 #include "spdk/env.h" 39 #include "spdk/log.h" 40 #include "spdk/thread.h" 41 #include "spdk/event.h" 42 #include "spdk/rpc.h" 43 #include "spdk/util.h" 44 #include "spdk/string.h" 45 46 #include "CUnit/Basic.h" 47 48 #define BUFFER_IOVS 1024 49 #define BUFFER_SIZE 260 * 1024 50 #define BDEV_TASK_ARRAY_SIZE 2048 51 52 pthread_mutex_t g_test_mutex; 53 pthread_cond_t g_test_cond; 54 55 static struct spdk_thread *g_thread_init; 56 static struct spdk_thread *g_thread_ut; 57 static struct spdk_thread *g_thread_io; 58 static bool g_wait_for_tests = false; 59 static int g_num_failures = 0; 60 61 struct io_target { 62 struct spdk_bdev *bdev; 63 struct spdk_bdev_desc *bdev_desc; 64 struct spdk_io_channel *ch; 65 struct io_target *next; 66 }; 67 68 struct bdevio_request { 69 char *buf; 70 char *fused_buf; 71 int data_len; 72 uint64_t offset; 73 struct iovec iov[BUFFER_IOVS]; 74 int iovcnt; 75 struct iovec fused_iov[BUFFER_IOVS]; 76 int fused_iovcnt; 77 struct io_target *target; 78 }; 79 80 struct io_target *g_io_targets = NULL; 81 struct io_target *g_current_io_target = NULL; 82 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request); 83 84 static void 85 execute_spdk_function(spdk_msg_fn fn, void *arg) 86 { 87 pthread_mutex_lock(&g_test_mutex); 88 spdk_thread_send_msg(g_thread_io, fn, arg); 89 pthread_cond_wait(&g_test_cond, &g_test_mutex); 90 pthread_mutex_unlock(&g_test_mutex); 91 } 92 93 static void 94 wake_ut_thread(void) 95 { 96 pthread_mutex_lock(&g_test_mutex); 97 pthread_cond_signal(&g_test_cond); 98 pthread_mutex_unlock(&g_test_mutex); 99 } 100 101 static void 102 __get_io_channel(void *arg) 103 { 104 struct io_target *target = arg; 105 106 target->ch = spdk_bdev_get_io_channel(target->bdev_desc); 107 assert(target->ch); 108 wake_ut_thread(); 109 } 110 111 static int 112 bdevio_construct_target(struct spdk_bdev *bdev) 113 { 114 struct io_target *target; 115 int rc; 116 uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev); 117 uint32_t block_size = spdk_bdev_get_block_size(bdev); 118 119 target = malloc(sizeof(struct io_target)); 120 if (target == NULL) { 121 return -ENOMEM; 122 } 123 124 rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc); 125 if (rc != 0) { 126 free(target); 127 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 128 return rc; 129 } 130 131 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n", 132 spdk_bdev_get_name(bdev), 133 num_blocks, block_size, 134 (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024)); 135 136 target->bdev = bdev; 137 target->next = g_io_targets; 138 execute_spdk_function(__get_io_channel, target); 139 g_io_targets = target; 140 141 return 0; 142 } 143 144 static int 145 bdevio_construct_targets(void) 146 { 147 struct spdk_bdev *bdev; 148 int rc; 149 150 printf("I/O targets:\n"); 151 152 bdev = spdk_bdev_first_leaf(); 153 while (bdev != NULL) { 154 rc = bdevio_construct_target(bdev); 155 if (rc < 0) { 156 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 157 return rc; 158 } 159 bdev = spdk_bdev_next_leaf(bdev); 160 } 161 162 if (g_io_targets == NULL) { 163 SPDK_ERRLOG("No bdevs to perform tests on\n"); 164 return -1; 165 } 166 167 return 0; 168 } 169 170 static void 171 __put_io_channel(void *arg) 172 { 173 struct io_target *target = arg; 174 175 spdk_put_io_channel(target->ch); 176 wake_ut_thread(); 177 } 178 179 static void 180 bdevio_cleanup_targets(void) 181 { 182 struct io_target *target; 183 184 target = g_io_targets; 185 while (target != NULL) { 186 execute_spdk_function(__put_io_channel, target); 187 spdk_bdev_close(target->bdev_desc); 188 g_io_targets = target->next; 189 free(target); 190 target = g_io_targets; 191 } 192 } 193 194 static bool g_completion_success; 195 196 static void 197 initialize_buffer(char **buf, int pattern, int size) 198 { 199 *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 200 memset(*buf, pattern, size); 201 } 202 203 static void 204 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 205 { 206 g_completion_success = success; 207 spdk_bdev_free_io(bdev_io); 208 wake_ut_thread(); 209 } 210 211 static void 212 __blockdev_write(void *arg) 213 { 214 struct bdevio_request *req = arg; 215 struct io_target *target = req->target; 216 int rc; 217 218 if (req->iovcnt) { 219 rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 220 req->data_len, quick_test_complete, NULL); 221 } else { 222 rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset, 223 req->data_len, quick_test_complete, NULL); 224 } 225 226 if (rc) { 227 g_completion_success = false; 228 wake_ut_thread(); 229 } 230 } 231 232 static void 233 __blockdev_write_zeroes(void *arg) 234 { 235 struct bdevio_request *req = arg; 236 struct io_target *target = req->target; 237 int rc; 238 239 rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset, 240 req->data_len, quick_test_complete, NULL); 241 if (rc) { 242 g_completion_success = false; 243 wake_ut_thread(); 244 } 245 } 246 247 static void 248 __blockdev_compare_and_write(void *arg) 249 { 250 struct bdevio_request *req = arg; 251 struct io_target *target = req->target; 252 int rc; 253 254 rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt, 255 req->fused_iov, req->fused_iovcnt, req->offset, req->data_len, quick_test_complete, NULL); 256 257 if (rc) { 258 g_completion_success = false; 259 wake_ut_thread(); 260 } 261 } 262 263 static void 264 sgl_chop_buffer(struct bdevio_request *req, int iov_len) 265 { 266 int data_len = req->data_len; 267 char *buf = req->buf; 268 269 req->iovcnt = 0; 270 if (!iov_len) { 271 return; 272 } 273 274 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) { 275 if (data_len < iov_len) { 276 iov_len = data_len; 277 } 278 279 req->iov[req->iovcnt].iov_base = buf; 280 req->iov[req->iovcnt].iov_len = iov_len; 281 282 buf += iov_len; 283 data_len -= iov_len; 284 } 285 286 CU_ASSERT_EQUAL_FATAL(data_len, 0); 287 } 288 289 static void 290 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len) 291 { 292 int data_len = req->data_len; 293 char *buf = req->fused_buf; 294 295 req->fused_iovcnt = 0; 296 if (!iov_len) { 297 return; 298 } 299 300 for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) { 301 if (data_len < iov_len) { 302 iov_len = data_len; 303 } 304 305 req->fused_iov[req->fused_iovcnt].iov_base = buf; 306 req->fused_iov[req->fused_iovcnt].iov_len = iov_len; 307 308 buf += iov_len; 309 data_len -= iov_len; 310 } 311 312 CU_ASSERT_EQUAL_FATAL(data_len, 0); 313 } 314 315 static void 316 blockdev_write(struct io_target *target, char *tx_buf, 317 uint64_t offset, int data_len, int iov_len) 318 { 319 struct bdevio_request req; 320 321 req.target = target; 322 req.buf = tx_buf; 323 req.data_len = data_len; 324 req.offset = offset; 325 sgl_chop_buffer(&req, iov_len); 326 327 g_completion_success = false; 328 329 execute_spdk_function(__blockdev_write, &req); 330 } 331 332 static void 333 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf, 334 uint64_t offset, int data_len, int iov_len) 335 { 336 struct bdevio_request req; 337 338 req.target = target; 339 req.buf = cmp_buf; 340 req.fused_buf = write_buf; 341 req.data_len = data_len; 342 req.offset = offset; 343 sgl_chop_buffer(&req, iov_len); 344 sgl_chop_fused_buffer(&req, iov_len); 345 346 g_completion_success = false; 347 348 execute_spdk_function(__blockdev_compare_and_write, &req); 349 } 350 351 static void 352 blockdev_write_zeroes(struct io_target *target, char *tx_buf, 353 uint64_t offset, int data_len) 354 { 355 struct bdevio_request req; 356 357 req.target = target; 358 req.buf = tx_buf; 359 req.data_len = data_len; 360 req.offset = offset; 361 362 g_completion_success = false; 363 364 execute_spdk_function(__blockdev_write_zeroes, &req); 365 } 366 367 static void 368 __blockdev_read(void *arg) 369 { 370 struct bdevio_request *req = arg; 371 struct io_target *target = req->target; 372 int rc; 373 374 if (req->iovcnt) { 375 rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 376 req->data_len, quick_test_complete, NULL); 377 } else { 378 rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset, 379 req->data_len, quick_test_complete, NULL); 380 } 381 382 if (rc) { 383 g_completion_success = false; 384 wake_ut_thread(); 385 } 386 } 387 388 static void 389 blockdev_read(struct io_target *target, char *rx_buf, 390 uint64_t offset, int data_len, int iov_len) 391 { 392 struct bdevio_request req; 393 394 req.target = target; 395 req.buf = rx_buf; 396 req.data_len = data_len; 397 req.offset = offset; 398 req.iovcnt = 0; 399 sgl_chop_buffer(&req, iov_len); 400 401 g_completion_success = false; 402 403 execute_spdk_function(__blockdev_read, &req); 404 } 405 406 static int 407 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length) 408 { 409 int rc; 410 rc = memcmp(rx_buf, tx_buf, data_length); 411 412 spdk_free(rx_buf); 413 spdk_free(tx_buf); 414 415 return rc; 416 } 417 418 static void 419 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset, 420 int expected_rc, bool write_zeroes) 421 { 422 struct io_target *target; 423 char *tx_buf = NULL; 424 char *rx_buf = NULL; 425 int rc; 426 427 target = g_current_io_target; 428 429 if (data_length < spdk_bdev_get_block_size(target->bdev) || 430 data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) { 431 return; 432 } 433 434 if (!write_zeroes) { 435 initialize_buffer(&tx_buf, pattern, data_length); 436 initialize_buffer(&rx_buf, 0, data_length); 437 438 blockdev_write(target, tx_buf, offset, data_length, iov_len); 439 } else { 440 initialize_buffer(&tx_buf, 0, data_length); 441 initialize_buffer(&rx_buf, pattern, data_length); 442 443 blockdev_write_zeroes(target, tx_buf, offset, data_length); 444 } 445 446 447 if (expected_rc == 0) { 448 CU_ASSERT_EQUAL(g_completion_success, true); 449 } else { 450 CU_ASSERT_EQUAL(g_completion_success, false); 451 } 452 blockdev_read(target, rx_buf, offset, data_length, iov_len); 453 454 if (expected_rc == 0) { 455 CU_ASSERT_EQUAL(g_completion_success, true); 456 } else { 457 CU_ASSERT_EQUAL(g_completion_success, false); 458 } 459 460 if (g_completion_success) { 461 rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length); 462 /* Assert the write by comparing it with values read 463 * from each blockdev */ 464 CU_ASSERT_EQUAL(rc, 0); 465 } 466 } 467 468 static void 469 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset) 470 { 471 struct io_target *target; 472 char *tx_buf = NULL; 473 char *write_buf = NULL; 474 char *rx_buf = NULL; 475 int rc; 476 477 target = g_current_io_target; 478 479 if (data_length < spdk_bdev_get_block_size(target->bdev) || 480 data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) { 481 return; 482 } 483 484 initialize_buffer(&tx_buf, 0xAA, data_length); 485 initialize_buffer(&rx_buf, 0, data_length); 486 initialize_buffer(&write_buf, 0xBB, data_length); 487 488 blockdev_write(target, tx_buf, offset, data_length, iov_len); 489 CU_ASSERT_EQUAL(g_completion_success, true); 490 491 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 492 CU_ASSERT_EQUAL(g_completion_success, true); 493 494 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 495 CU_ASSERT_EQUAL(g_completion_success, false); 496 497 blockdev_read(target, rx_buf, offset, data_length, iov_len); 498 CU_ASSERT_EQUAL(g_completion_success, true); 499 rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length); 500 /* Assert the write by comparing it with values read 501 * from each blockdev */ 502 CU_ASSERT_EQUAL(rc, 0); 503 } 504 505 static void 506 blockdev_write_read_4k(void) 507 { 508 uint32_t data_length; 509 uint64_t offset; 510 int pattern; 511 int expected_rc; 512 513 /* Data size = 4K */ 514 data_length = 4096; 515 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 516 offset = 0; 517 pattern = 0xA3; 518 /* Params are valid, hence the expected return value 519 * of write and read for all blockdevs is 0. */ 520 expected_rc = 0; 521 522 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 523 } 524 525 static void 526 blockdev_write_zeroes_read_4k(void) 527 { 528 uint32_t data_length; 529 uint64_t offset; 530 int pattern; 531 int expected_rc; 532 533 /* Data size = 4K */ 534 data_length = 4096; 535 offset = 0; 536 pattern = 0xA3; 537 /* Params are valid, hence the expected return value 538 * of write_zeroes and read for all blockdevs is 0. */ 539 expected_rc = 0; 540 541 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 542 } 543 544 /* 545 * This i/o will not have to split at the bdev layer. 546 */ 547 static void 548 blockdev_write_zeroes_read_1m(void) 549 { 550 uint32_t data_length; 551 uint64_t offset; 552 int pattern; 553 int expected_rc; 554 555 /* Data size = 1M */ 556 data_length = 1048576; 557 offset = 0; 558 pattern = 0xA3; 559 /* Params are valid, hence the expected return value 560 * of write_zeroes and read for all blockdevs is 0. */ 561 expected_rc = 0; 562 563 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 564 } 565 566 /* 567 * This i/o will have to split at the bdev layer if 568 * write-zeroes is not supported by the bdev. 569 */ 570 static void 571 blockdev_write_zeroes_read_3m(void) 572 { 573 uint32_t data_length; 574 uint64_t offset; 575 int pattern; 576 int expected_rc; 577 578 /* Data size = 3M */ 579 data_length = 3145728; 580 offset = 0; 581 pattern = 0xA3; 582 /* Params are valid, hence the expected return value 583 * of write_zeroes and read for all blockdevs is 0. */ 584 expected_rc = 0; 585 586 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 587 } 588 589 /* 590 * This i/o will have to split at the bdev layer if 591 * write-zeroes is not supported by the bdev. It also 592 * tests a write size that is not an even multiple of 593 * the bdev layer zero buffer size. 594 */ 595 static void 596 blockdev_write_zeroes_read_3m_500k(void) 597 { 598 uint32_t data_length; 599 uint64_t offset; 600 int pattern; 601 int expected_rc; 602 603 /* Data size = 3.5M */ 604 data_length = 3670016; 605 offset = 0; 606 pattern = 0xA3; 607 /* Params are valid, hence the expected return value 608 * of write_zeroes and read for all blockdevs is 0. */ 609 expected_rc = 0; 610 611 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 612 } 613 614 static void 615 blockdev_writev_readv_4k(void) 616 { 617 uint32_t data_length, iov_len; 618 uint64_t offset; 619 int pattern; 620 int expected_rc; 621 622 /* Data size = 4K */ 623 data_length = 4096; 624 iov_len = 4096; 625 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 626 offset = 0; 627 pattern = 0xA3; 628 /* Params are valid, hence the expected return value 629 * of write and read for all blockdevs is 0. */ 630 expected_rc = 0; 631 632 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 633 } 634 635 static void 636 blockdev_comparev_and_writev(void) 637 { 638 uint32_t data_length, iov_len; 639 uint64_t offset; 640 641 data_length = 1; 642 iov_len = 1; 643 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 644 offset = 0; 645 646 blockdev_compare_and_write(data_length, iov_len, offset); 647 } 648 649 static void 650 blockdev_writev_readv_30x4k(void) 651 { 652 uint32_t data_length, iov_len; 653 uint64_t offset; 654 int pattern; 655 int expected_rc; 656 657 /* Data size = 4K */ 658 data_length = 4096 * 30; 659 iov_len = 4096; 660 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 661 offset = 0; 662 pattern = 0xA3; 663 /* Params are valid, hence the expected return value 664 * of write and read for all blockdevs is 0. */ 665 expected_rc = 0; 666 667 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 668 } 669 670 static void 671 blockdev_write_read_512Bytes(void) 672 { 673 uint32_t data_length; 674 uint64_t offset; 675 int pattern; 676 int expected_rc; 677 678 /* Data size = 512 */ 679 data_length = 512; 680 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 681 offset = 8192; 682 pattern = 0xA3; 683 /* Params are valid, hence the expected return value 684 * of write and read for all blockdevs is 0. */ 685 expected_rc = 0; 686 687 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 688 } 689 690 static void 691 blockdev_writev_readv_512Bytes(void) 692 { 693 uint32_t data_length, iov_len; 694 uint64_t offset; 695 int pattern; 696 int expected_rc; 697 698 /* Data size = 512 */ 699 data_length = 512; 700 iov_len = 512; 701 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 702 offset = 8192; 703 pattern = 0xA3; 704 /* Params are valid, hence the expected return value 705 * of write and read for all blockdevs is 0. */ 706 expected_rc = 0; 707 708 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 709 } 710 711 static void 712 blockdev_write_read_size_gt_128k(void) 713 { 714 uint32_t data_length; 715 uint64_t offset; 716 int pattern; 717 int expected_rc; 718 719 /* Data size = 132K */ 720 data_length = 135168; 721 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 722 offset = 8192; 723 pattern = 0xA3; 724 /* Params are valid, hence the expected return value 725 * of write and read for all blockdevs is 0. */ 726 expected_rc = 0; 727 728 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 729 } 730 731 static void 732 blockdev_writev_readv_size_gt_128k(void) 733 { 734 uint32_t data_length, iov_len; 735 uint64_t offset; 736 int pattern; 737 int expected_rc; 738 739 /* Data size = 132K */ 740 data_length = 135168; 741 iov_len = 135168; 742 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 743 offset = 8192; 744 pattern = 0xA3; 745 /* Params are valid, hence the expected return value 746 * of write and read for all blockdevs is 0. */ 747 expected_rc = 0; 748 749 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 750 } 751 752 static void 753 blockdev_writev_readv_size_gt_128k_two_iov(void) 754 { 755 uint32_t data_length, iov_len; 756 uint64_t offset; 757 int pattern; 758 int expected_rc; 759 760 /* Data size = 132K */ 761 data_length = 135168; 762 iov_len = 128 * 1024; 763 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 764 offset = 8192; 765 pattern = 0xA3; 766 /* Params are valid, hence the expected return value 767 * of write and read for all blockdevs is 0. */ 768 expected_rc = 0; 769 770 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 771 } 772 773 static void 774 blockdev_write_read_invalid_size(void) 775 { 776 uint32_t data_length; 777 uint64_t offset; 778 int pattern; 779 int expected_rc; 780 781 /* Data size is not a multiple of the block size */ 782 data_length = 0x1015; 783 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 784 offset = 8192; 785 pattern = 0xA3; 786 /* Params are invalid, hence the expected return value 787 * of write and read for all blockdevs is < 0 */ 788 expected_rc = -1; 789 790 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 791 } 792 793 static void 794 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void) 795 { 796 struct io_target *target; 797 struct spdk_bdev *bdev; 798 char *tx_buf = NULL; 799 char *rx_buf = NULL; 800 uint64_t offset; 801 uint32_t block_size; 802 int rc; 803 804 target = g_current_io_target; 805 bdev = target->bdev; 806 807 block_size = spdk_bdev_get_block_size(bdev); 808 809 /* The start offset has been set to a marginal value 810 * such that offset + nbytes == Total size of 811 * blockdev. */ 812 offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size); 813 814 initialize_buffer(&tx_buf, 0xA3, block_size); 815 initialize_buffer(&rx_buf, 0, block_size); 816 817 blockdev_write(target, tx_buf, offset, block_size, 0); 818 CU_ASSERT_EQUAL(g_completion_success, true); 819 820 blockdev_read(target, rx_buf, offset, block_size, 0); 821 CU_ASSERT_EQUAL(g_completion_success, true); 822 823 rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size); 824 /* Assert the write by comparing it with values read 825 * from each blockdev */ 826 CU_ASSERT_EQUAL(rc, 0); 827 } 828 829 static void 830 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void) 831 { 832 struct io_target *target; 833 struct spdk_bdev *bdev; 834 char *tx_buf = NULL; 835 char *rx_buf = NULL; 836 int data_length; 837 uint64_t offset; 838 int pattern; 839 840 /* Tests the overflow condition of the blockdevs. */ 841 data_length = 4096; 842 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 843 pattern = 0xA3; 844 845 target = g_current_io_target; 846 bdev = target->bdev; 847 848 /* The start offset has been set to a valid value 849 * but offset + nbytes is greater than the Total size 850 * of the blockdev. The test should fail. */ 851 offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024); 852 853 initialize_buffer(&tx_buf, pattern, data_length); 854 initialize_buffer(&rx_buf, 0, data_length); 855 856 blockdev_write(target, tx_buf, offset, data_length, 0); 857 CU_ASSERT_EQUAL(g_completion_success, false); 858 859 blockdev_read(target, rx_buf, offset, data_length, 0); 860 CU_ASSERT_EQUAL(g_completion_success, false); 861 } 862 863 static void 864 blockdev_write_read_max_offset(void) 865 { 866 int data_length; 867 uint64_t offset; 868 int pattern; 869 int expected_rc; 870 871 data_length = 4096; 872 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 873 /* The start offset has been set to UINT64_MAX such that 874 * adding nbytes wraps around and points to an invalid address. */ 875 offset = UINT64_MAX; 876 pattern = 0xA3; 877 /* Params are invalid, hence the expected return value 878 * of write and read for all blockdevs is < 0 */ 879 expected_rc = -1; 880 881 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 882 } 883 884 static void 885 blockdev_overlapped_write_read_8k(void) 886 { 887 int data_length; 888 uint64_t offset; 889 int pattern; 890 int expected_rc; 891 892 /* Data size = 8K */ 893 data_length = 8192; 894 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 895 offset = 0; 896 pattern = 0xA3; 897 /* Params are valid, hence the expected return value 898 * of write and read for all blockdevs is 0. */ 899 expected_rc = 0; 900 /* Assert the write by comparing it with values read 901 * from the same offset for each blockdev */ 902 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 903 904 /* Overwrite the pattern 0xbb of size 8K on an address offset overlapping 905 * with the address written above and assert the new value in 906 * the overlapped address range */ 907 /* Populate 8k with value 0xBB */ 908 pattern = 0xBB; 909 /* Offset = 6144; Overlap offset addresses and write value 0xbb */ 910 offset = 4096; 911 /* Assert the write by comparing it with values read 912 * from the overlapped offset for each blockdev */ 913 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 914 } 915 916 static void 917 __blockdev_reset(void *arg) 918 { 919 struct bdevio_request *req = arg; 920 struct io_target *target = req->target; 921 int rc; 922 923 rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL); 924 if (rc < 0) { 925 g_completion_success = false; 926 wake_ut_thread(); 927 } 928 } 929 930 static void 931 blockdev_test_reset(void) 932 { 933 struct bdevio_request req; 934 struct io_target *target; 935 936 target = g_current_io_target; 937 req.target = target; 938 939 g_completion_success = false; 940 941 execute_spdk_function(__blockdev_reset, &req); 942 943 /* Workaround: NVMe-oF target doesn't support reset yet - so for now 944 * don't fail the test if it's an NVMe bdev. 945 */ 946 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 947 CU_ASSERT_EQUAL(g_completion_success, true); 948 } 949 } 950 951 struct bdevio_passthrough_request { 952 struct spdk_nvme_cmd cmd; 953 void *buf; 954 uint32_t len; 955 struct io_target *target; 956 int sct; 957 int sc; 958 uint32_t cdw0; 959 }; 960 961 static void 962 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 963 { 964 struct bdevio_passthrough_request *pt_req = arg; 965 966 spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc); 967 spdk_bdev_free_io(bdev_io); 968 wake_ut_thread(); 969 } 970 971 static void 972 __blockdev_nvme_passthru(void *arg) 973 { 974 struct bdevio_passthrough_request *pt_req = arg; 975 struct io_target *target = pt_req->target; 976 int rc; 977 978 rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch, 979 &pt_req->cmd, pt_req->buf, pt_req->len, 980 nvme_pt_test_complete, pt_req); 981 if (rc) { 982 wake_ut_thread(); 983 } 984 } 985 986 static void 987 blockdev_test_nvme_passthru_rw(void) 988 { 989 struct bdevio_passthrough_request pt_req; 990 void *write_buf, *read_buf; 991 struct io_target *target; 992 993 target = g_current_io_target; 994 995 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 996 return; 997 } 998 999 memset(&pt_req, 0, sizeof(pt_req)); 1000 pt_req.target = target; 1001 pt_req.cmd.opc = SPDK_NVME_OPC_WRITE; 1002 pt_req.cmd.nsid = 1; 1003 *(uint64_t *)&pt_req.cmd.cdw10 = 4; 1004 pt_req.cmd.cdw12 = 0; 1005 1006 pt_req.len = spdk_bdev_get_block_size(target->bdev); 1007 write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1008 memset(write_buf, 0xA5, pt_req.len); 1009 pt_req.buf = write_buf; 1010 1011 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1012 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1013 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1014 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1015 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1016 1017 pt_req.cmd.opc = SPDK_NVME_OPC_READ; 1018 read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1019 pt_req.buf = read_buf; 1020 1021 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1022 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1023 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1024 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1025 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1026 1027 CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len)); 1028 spdk_free(read_buf); 1029 spdk_free(write_buf); 1030 } 1031 1032 static void 1033 blockdev_test_nvme_passthru_vendor_specific(void) 1034 { 1035 struct bdevio_passthrough_request pt_req; 1036 struct io_target *target; 1037 1038 target = g_current_io_target; 1039 1040 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1041 return; 1042 } 1043 1044 memset(&pt_req, 0, sizeof(pt_req)); 1045 pt_req.target = target; 1046 pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */ 1047 pt_req.cmd.nsid = 1; 1048 1049 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1050 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1051 pt_req.cdw0 = 0xbeef; 1052 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1053 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1054 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE); 1055 CU_ASSERT(pt_req.cdw0 == 0x0); 1056 } 1057 1058 static void 1059 __blockdev_nvme_admin_passthru(void *arg) 1060 { 1061 struct bdevio_passthrough_request *pt_req = arg; 1062 struct io_target *target = pt_req->target; 1063 int rc; 1064 1065 rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch, 1066 &pt_req->cmd, pt_req->buf, pt_req->len, 1067 nvme_pt_test_complete, pt_req); 1068 if (rc) { 1069 wake_ut_thread(); 1070 } 1071 } 1072 1073 static void 1074 blockdev_test_nvme_admin_passthru(void) 1075 { 1076 struct io_target *target; 1077 struct bdevio_passthrough_request pt_req; 1078 1079 target = g_current_io_target; 1080 1081 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) { 1082 return; 1083 } 1084 1085 memset(&pt_req, 0, sizeof(pt_req)); 1086 pt_req.target = target; 1087 pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY; 1088 pt_req.cmd.nsid = 0; 1089 *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR; 1090 1091 pt_req.len = sizeof(struct spdk_nvme_ctrlr_data); 1092 pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1093 1094 pt_req.sct = SPDK_NVME_SCT_GENERIC; 1095 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1096 execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req); 1097 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1098 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1099 } 1100 1101 static void 1102 __stop_init_thread(void *arg) 1103 { 1104 unsigned num_failures = g_num_failures; 1105 struct spdk_jsonrpc_request *request = arg; 1106 1107 g_num_failures = 0; 1108 1109 bdevio_cleanup_targets(); 1110 if (g_wait_for_tests) { 1111 /* Do not stop the app yet, wait for another RPC */ 1112 rpc_perform_tests_cb(num_failures, request); 1113 return; 1114 } 1115 spdk_app_stop(num_failures); 1116 } 1117 1118 static void 1119 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request) 1120 { 1121 g_num_failures = num_failures; 1122 1123 spdk_thread_send_msg(g_thread_init, __stop_init_thread, request); 1124 } 1125 1126 static int 1127 suite_init(void) 1128 { 1129 if (g_current_io_target == NULL) { 1130 g_current_io_target = g_io_targets; 1131 } 1132 return 0; 1133 } 1134 1135 static int 1136 suite_fini(void) 1137 { 1138 g_current_io_target = g_current_io_target->next; 1139 return 0; 1140 } 1141 1142 #define SUITE_NAME_MAX 64 1143 1144 static int 1145 __setup_ut_on_single_target(struct io_target *target) 1146 { 1147 unsigned rc = 0; 1148 CU_pSuite suite = NULL; 1149 char name[SUITE_NAME_MAX]; 1150 1151 snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev)); 1152 suite = CU_add_suite(name, suite_init, suite_fini); 1153 if (suite == NULL) { 1154 CU_cleanup_registry(); 1155 rc = CU_get_error(); 1156 return -rc; 1157 } 1158 1159 if ( 1160 CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL 1161 || CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL 1162 || CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL 1163 || CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL 1164 || CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL 1165 || CU_add_test(suite, "blockdev reset", 1166 blockdev_test_reset) == NULL 1167 || CU_add_test(suite, "blockdev write read 512 bytes", 1168 blockdev_write_read_512Bytes) == NULL 1169 || CU_add_test(suite, "blockdev write read size > 128k", 1170 blockdev_write_read_size_gt_128k) == NULL 1171 || CU_add_test(suite, "blockdev write read invalid size", 1172 blockdev_write_read_invalid_size) == NULL 1173 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev", 1174 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL 1175 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev", 1176 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL 1177 || CU_add_test(suite, "blockdev write read max offset", 1178 blockdev_write_read_max_offset) == NULL 1179 || CU_add_test(suite, "blockdev write read 8k on overlapped address offset", 1180 blockdev_overlapped_write_read_8k) == NULL 1181 || CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL 1182 || CU_add_test(suite, "blockdev writev readv 30 x 4k", 1183 blockdev_writev_readv_30x4k) == NULL 1184 || CU_add_test(suite, "blockdev writev readv 512 bytes", 1185 blockdev_writev_readv_512Bytes) == NULL 1186 || CU_add_test(suite, "blockdev writev readv size > 128k", 1187 blockdev_writev_readv_size_gt_128k) == NULL 1188 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs", 1189 blockdev_writev_readv_size_gt_128k_two_iov) == NULL 1190 || CU_add_test(suite, "blockdev comparev and writev", blockdev_comparev_and_writev) == NULL 1191 || CU_add_test(suite, "blockdev nvme passthru rw", 1192 blockdev_test_nvme_passthru_rw) == NULL 1193 || CU_add_test(suite, "blockdev nvme passthru vendor specific", 1194 blockdev_test_nvme_passthru_vendor_specific) == NULL 1195 || CU_add_test(suite, "blockdev nvme admin passthru", 1196 blockdev_test_nvme_admin_passthru) == NULL 1197 ) { 1198 CU_cleanup_registry(); 1199 rc = CU_get_error(); 1200 return -rc; 1201 } 1202 return 0; 1203 } 1204 1205 static void 1206 __run_ut_thread(void *arg) 1207 { 1208 struct spdk_jsonrpc_request *request = arg; 1209 int rc = 0; 1210 struct io_target *target; 1211 unsigned num_failures; 1212 1213 if (CU_initialize_registry() != CUE_SUCCESS) { 1214 /* CUnit error, probably won't recover */ 1215 rc = CU_get_error(); 1216 stop_init_thread(-rc, request); 1217 } 1218 1219 target = g_io_targets; 1220 while (target != NULL) { 1221 rc = __setup_ut_on_single_target(target); 1222 if (rc < 0) { 1223 /* CUnit error, probably won't recover */ 1224 stop_init_thread(-rc, request); 1225 } 1226 target = target->next; 1227 } 1228 CU_basic_set_mode(CU_BRM_VERBOSE); 1229 CU_basic_run_tests(); 1230 num_failures = CU_get_number_of_failures(); 1231 CU_cleanup_registry(); 1232 1233 stop_init_thread(num_failures, request); 1234 } 1235 1236 static void 1237 __construct_targets(void *arg) 1238 { 1239 if (bdevio_construct_targets() < 0) { 1240 spdk_app_stop(-1); 1241 return; 1242 } 1243 1244 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL); 1245 } 1246 1247 static void 1248 test_main(void *arg1) 1249 { 1250 struct spdk_cpuset tmpmask = {}, *appmask; 1251 uint32_t cpu, init_cpu; 1252 1253 pthread_mutex_init(&g_test_mutex, NULL); 1254 pthread_cond_init(&g_test_cond, NULL); 1255 1256 appmask = spdk_app_get_core_mask(); 1257 1258 if (spdk_cpuset_count(appmask) < 3) { 1259 spdk_app_stop(-1); 1260 return; 1261 } 1262 1263 init_cpu = spdk_env_get_current_core(); 1264 g_thread_init = spdk_get_thread(); 1265 1266 for (cpu = 0; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) { 1267 if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) { 1268 spdk_cpuset_zero(&tmpmask); 1269 spdk_cpuset_set_cpu(&tmpmask, cpu, true); 1270 g_thread_ut = spdk_thread_create("ut_thread", &tmpmask); 1271 break; 1272 } 1273 } 1274 1275 if (cpu == SPDK_ENV_LCORE_ID_ANY) { 1276 spdk_app_stop(-1); 1277 return; 1278 } 1279 1280 for (cpu++; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) { 1281 if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) { 1282 spdk_cpuset_zero(&tmpmask); 1283 spdk_cpuset_set_cpu(&tmpmask, cpu, true); 1284 g_thread_io = spdk_thread_create("io_thread", &tmpmask); 1285 break; 1286 } 1287 } 1288 1289 if (cpu == SPDK_ENV_LCORE_ID_ANY) { 1290 spdk_app_stop(-1); 1291 return; 1292 } 1293 1294 if (g_wait_for_tests) { 1295 /* Do not perform any tests until RPC is received */ 1296 return; 1297 } 1298 1299 spdk_thread_send_msg(g_thread_init, __construct_targets, NULL); 1300 } 1301 1302 static void 1303 bdevio_usage(void) 1304 { 1305 printf(" -w start bdevio app and wait for RPC to start the tests\n"); 1306 } 1307 1308 static int 1309 bdevio_parse_arg(int ch, char *arg) 1310 { 1311 switch (ch) { 1312 case 'w': 1313 g_wait_for_tests = true; 1314 break; 1315 default: 1316 return -EINVAL; 1317 } 1318 return 0; 1319 } 1320 1321 struct rpc_perform_tests { 1322 char *name; 1323 }; 1324 1325 static void 1326 free_rpc_perform_tests(struct rpc_perform_tests *r) 1327 { 1328 free(r->name); 1329 } 1330 1331 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = { 1332 {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true}, 1333 }; 1334 1335 static void 1336 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request) 1337 { 1338 struct spdk_json_write_ctx *w; 1339 1340 if (num_failures == 0) { 1341 w = spdk_jsonrpc_begin_result(request); 1342 spdk_json_write_uint32(w, num_failures); 1343 spdk_jsonrpc_end_result(request, w); 1344 } else { 1345 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1346 "%d test cases failed", num_failures); 1347 } 1348 } 1349 1350 static void 1351 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) 1352 { 1353 struct rpc_perform_tests req = {NULL}; 1354 struct spdk_bdev *bdev; 1355 int rc; 1356 1357 if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders, 1358 SPDK_COUNTOF(rpc_perform_tests_decoders), 1359 &req)) { 1360 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1361 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 1362 goto invalid; 1363 } 1364 1365 if (req.name) { 1366 bdev = spdk_bdev_get_by_name(req.name); 1367 if (bdev == NULL) { 1368 SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name); 1369 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1370 "Bdev '%s' does not exist: %s", 1371 req.name, spdk_strerror(ENODEV)); 1372 goto invalid; 1373 } 1374 rc = bdevio_construct_target(bdev); 1375 if (rc < 0) { 1376 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev)); 1377 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1378 "Could not construct target for bdev '%s': %s", 1379 spdk_bdev_get_name(bdev), spdk_strerror(-rc)); 1380 goto invalid; 1381 } 1382 } else { 1383 rc = bdevio_construct_targets(); 1384 if (rc < 0) { 1385 SPDK_ERRLOG("Could not construct targets for all bdevs\n"); 1386 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1387 "Could not construct targets for all bdevs: %s", 1388 spdk_strerror(-rc)); 1389 goto invalid; 1390 } 1391 } 1392 free_rpc_perform_tests(&req); 1393 1394 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request); 1395 1396 return; 1397 1398 invalid: 1399 free_rpc_perform_tests(&req); 1400 } 1401 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME) 1402 1403 int 1404 main(int argc, char **argv) 1405 { 1406 int rc; 1407 struct spdk_app_opts opts = {}; 1408 1409 spdk_app_opts_init(&opts); 1410 opts.name = "bdevio"; 1411 opts.reactor_mask = "0x7"; 1412 1413 if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL, 1414 bdevio_parse_arg, bdevio_usage)) != 1415 SPDK_APP_PARSE_ARGS_SUCCESS) { 1416 return rc; 1417 } 1418 1419 rc = spdk_app_start(&opts, test_main, NULL); 1420 spdk_app_fini(); 1421 1422 return rc; 1423 } 1424