1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/accel_engine.h" 38 #include "spdk/env.h" 39 #include "spdk/log.h" 40 #include "spdk/thread.h" 41 #include "spdk/event.h" 42 #include "spdk/rpc.h" 43 #include "spdk/util.h" 44 #include "spdk/string.h" 45 46 #include "CUnit/Basic.h" 47 48 #define BUFFER_IOVS 1024 49 #define BUFFER_SIZE 260 * 1024 50 #define BDEV_TASK_ARRAY_SIZE 2048 51 52 pthread_mutex_t g_test_mutex; 53 pthread_cond_t g_test_cond; 54 55 static struct spdk_thread *g_thread_init; 56 static struct spdk_thread *g_thread_ut; 57 static struct spdk_thread *g_thread_io; 58 static bool g_wait_for_tests = false; 59 static int g_num_failures = 0; 60 static bool g_shutdown = false; 61 62 struct io_target { 63 struct spdk_bdev *bdev; 64 struct spdk_bdev_desc *bdev_desc; 65 struct spdk_io_channel *ch; 66 struct io_target *next; 67 }; 68 69 struct bdevio_request { 70 char *buf; 71 char *fused_buf; 72 int data_len; 73 uint64_t offset; 74 struct iovec iov[BUFFER_IOVS]; 75 int iovcnt; 76 struct iovec fused_iov[BUFFER_IOVS]; 77 int fused_iovcnt; 78 struct io_target *target; 79 }; 80 81 struct io_target *g_io_targets = NULL; 82 struct io_target *g_current_io_target = NULL; 83 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request); 84 85 static void 86 execute_spdk_function(spdk_msg_fn fn, void *arg) 87 { 88 pthread_mutex_lock(&g_test_mutex); 89 spdk_thread_send_msg(g_thread_io, fn, arg); 90 pthread_cond_wait(&g_test_cond, &g_test_mutex); 91 pthread_mutex_unlock(&g_test_mutex); 92 } 93 94 static void 95 wake_ut_thread(void) 96 { 97 pthread_mutex_lock(&g_test_mutex); 98 pthread_cond_signal(&g_test_cond); 99 pthread_mutex_unlock(&g_test_mutex); 100 } 101 102 static void 103 __get_io_channel(void *arg) 104 { 105 struct io_target *target = arg; 106 107 target->ch = spdk_bdev_get_io_channel(target->bdev_desc); 108 assert(target->ch); 109 wake_ut_thread(); 110 } 111 112 static int 113 bdevio_construct_target(struct spdk_bdev *bdev) 114 { 115 struct io_target *target; 116 int rc; 117 uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev); 118 uint32_t block_size = spdk_bdev_get_block_size(bdev); 119 120 target = malloc(sizeof(struct io_target)); 121 if (target == NULL) { 122 return -ENOMEM; 123 } 124 125 rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc); 126 if (rc != 0) { 127 free(target); 128 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 129 return rc; 130 } 131 132 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n", 133 spdk_bdev_get_name(bdev), 134 num_blocks, block_size, 135 (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024)); 136 137 target->bdev = bdev; 138 target->next = g_io_targets; 139 execute_spdk_function(__get_io_channel, target); 140 g_io_targets = target; 141 142 return 0; 143 } 144 145 static int 146 bdevio_construct_targets(void) 147 { 148 struct spdk_bdev *bdev; 149 int rc; 150 151 printf("I/O targets:\n"); 152 153 bdev = spdk_bdev_first_leaf(); 154 while (bdev != NULL) { 155 rc = bdevio_construct_target(bdev); 156 if (rc < 0) { 157 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 158 return rc; 159 } 160 bdev = spdk_bdev_next_leaf(bdev); 161 } 162 163 if (g_io_targets == NULL) { 164 SPDK_ERRLOG("No bdevs to perform tests on\n"); 165 return -1; 166 } 167 168 return 0; 169 } 170 171 static void 172 __put_io_channel(void *arg) 173 { 174 struct io_target *target = arg; 175 176 spdk_put_io_channel(target->ch); 177 wake_ut_thread(); 178 } 179 180 static void 181 bdevio_cleanup_targets(void) 182 { 183 struct io_target *target; 184 185 target = g_io_targets; 186 while (target != NULL) { 187 execute_spdk_function(__put_io_channel, target); 188 spdk_bdev_close(target->bdev_desc); 189 g_io_targets = target->next; 190 free(target); 191 target = g_io_targets; 192 } 193 } 194 195 static bool g_completion_success; 196 197 static void 198 initialize_buffer(char **buf, int pattern, int size) 199 { 200 *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 201 memset(*buf, pattern, size); 202 } 203 204 static void 205 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 206 { 207 g_completion_success = success; 208 spdk_bdev_free_io(bdev_io); 209 wake_ut_thread(); 210 } 211 212 static void 213 __blockdev_write(void *arg) 214 { 215 struct bdevio_request *req = arg; 216 struct io_target *target = req->target; 217 int rc; 218 219 if (req->iovcnt) { 220 rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 221 req->data_len, quick_test_complete, NULL); 222 } else { 223 rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset, 224 req->data_len, quick_test_complete, NULL); 225 } 226 227 if (rc) { 228 g_completion_success = false; 229 wake_ut_thread(); 230 } 231 } 232 233 static void 234 __blockdev_write_zeroes(void *arg) 235 { 236 struct bdevio_request *req = arg; 237 struct io_target *target = req->target; 238 int rc; 239 240 rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset, 241 req->data_len, quick_test_complete, NULL); 242 if (rc) { 243 g_completion_success = false; 244 wake_ut_thread(); 245 } 246 } 247 248 static void 249 __blockdev_compare_and_write(void *arg) 250 { 251 struct bdevio_request *req = arg; 252 struct io_target *target = req->target; 253 int rc; 254 255 rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt, 256 req->fused_iov, req->fused_iovcnt, req->offset, req->data_len, quick_test_complete, NULL); 257 258 if (rc) { 259 g_completion_success = false; 260 wake_ut_thread(); 261 } 262 } 263 264 static void 265 sgl_chop_buffer(struct bdevio_request *req, int iov_len) 266 { 267 int data_len = req->data_len; 268 char *buf = req->buf; 269 270 req->iovcnt = 0; 271 if (!iov_len) { 272 return; 273 } 274 275 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) { 276 if (data_len < iov_len) { 277 iov_len = data_len; 278 } 279 280 req->iov[req->iovcnt].iov_base = buf; 281 req->iov[req->iovcnt].iov_len = iov_len; 282 283 buf += iov_len; 284 data_len -= iov_len; 285 } 286 287 CU_ASSERT_EQUAL_FATAL(data_len, 0); 288 } 289 290 static void 291 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len) 292 { 293 int data_len = req->data_len; 294 char *buf = req->fused_buf; 295 296 req->fused_iovcnt = 0; 297 if (!iov_len) { 298 return; 299 } 300 301 for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) { 302 if (data_len < iov_len) { 303 iov_len = data_len; 304 } 305 306 req->fused_iov[req->fused_iovcnt].iov_base = buf; 307 req->fused_iov[req->fused_iovcnt].iov_len = iov_len; 308 309 buf += iov_len; 310 data_len -= iov_len; 311 } 312 313 CU_ASSERT_EQUAL_FATAL(data_len, 0); 314 } 315 316 static void 317 blockdev_write(struct io_target *target, char *tx_buf, 318 uint64_t offset, int data_len, int iov_len) 319 { 320 struct bdevio_request req; 321 322 req.target = target; 323 req.buf = tx_buf; 324 req.data_len = data_len; 325 req.offset = offset; 326 sgl_chop_buffer(&req, iov_len); 327 328 g_completion_success = false; 329 330 execute_spdk_function(__blockdev_write, &req); 331 } 332 333 static void 334 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf, 335 uint64_t offset, int data_len, int iov_len) 336 { 337 struct bdevio_request req; 338 339 req.target = target; 340 req.buf = cmp_buf; 341 req.fused_buf = write_buf; 342 req.data_len = data_len; 343 req.offset = offset; 344 sgl_chop_buffer(&req, iov_len); 345 sgl_chop_fused_buffer(&req, iov_len); 346 347 g_completion_success = false; 348 349 execute_spdk_function(__blockdev_compare_and_write, &req); 350 } 351 352 static void 353 blockdev_write_zeroes(struct io_target *target, char *tx_buf, 354 uint64_t offset, int data_len) 355 { 356 struct bdevio_request req; 357 358 req.target = target; 359 req.buf = tx_buf; 360 req.data_len = data_len; 361 req.offset = offset; 362 363 g_completion_success = false; 364 365 execute_spdk_function(__blockdev_write_zeroes, &req); 366 } 367 368 static void 369 __blockdev_read(void *arg) 370 { 371 struct bdevio_request *req = arg; 372 struct io_target *target = req->target; 373 int rc; 374 375 if (req->iovcnt) { 376 rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 377 req->data_len, quick_test_complete, NULL); 378 } else { 379 rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset, 380 req->data_len, quick_test_complete, NULL); 381 } 382 383 if (rc) { 384 g_completion_success = false; 385 wake_ut_thread(); 386 } 387 } 388 389 static void 390 blockdev_read(struct io_target *target, char *rx_buf, 391 uint64_t offset, int data_len, int iov_len) 392 { 393 struct bdevio_request req; 394 395 req.target = target; 396 req.buf = rx_buf; 397 req.data_len = data_len; 398 req.offset = offset; 399 req.iovcnt = 0; 400 sgl_chop_buffer(&req, iov_len); 401 402 g_completion_success = false; 403 404 execute_spdk_function(__blockdev_read, &req); 405 } 406 407 static int 408 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length) 409 { 410 int rc; 411 rc = memcmp(rx_buf, tx_buf, data_length); 412 413 spdk_free(rx_buf); 414 spdk_free(tx_buf); 415 416 return rc; 417 } 418 419 static bool 420 blockdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t data_length) 421 { 422 if (data_length < spdk_bdev_get_block_size(bdev) || 423 data_length % spdk_bdev_get_block_size(bdev) || 424 data_length / spdk_bdev_get_block_size(bdev) > spdk_bdev_get_num_blocks(bdev)) { 425 return false; 426 } 427 428 return true; 429 } 430 431 static void 432 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset, 433 int expected_rc, bool write_zeroes) 434 { 435 struct io_target *target; 436 char *tx_buf = NULL; 437 char *rx_buf = NULL; 438 int rc; 439 440 target = g_current_io_target; 441 442 if (!blockdev_io_valid_blocks(target->bdev, data_length)) { 443 return; 444 } 445 446 if (!write_zeroes) { 447 initialize_buffer(&tx_buf, pattern, data_length); 448 initialize_buffer(&rx_buf, 0, data_length); 449 450 blockdev_write(target, tx_buf, offset, data_length, iov_len); 451 } else { 452 initialize_buffer(&tx_buf, 0, data_length); 453 initialize_buffer(&rx_buf, pattern, data_length); 454 455 blockdev_write_zeroes(target, tx_buf, offset, data_length); 456 } 457 458 459 if (expected_rc == 0) { 460 CU_ASSERT_EQUAL(g_completion_success, true); 461 } else { 462 CU_ASSERT_EQUAL(g_completion_success, false); 463 } 464 blockdev_read(target, rx_buf, offset, data_length, iov_len); 465 466 if (expected_rc == 0) { 467 CU_ASSERT_EQUAL(g_completion_success, true); 468 } else { 469 CU_ASSERT_EQUAL(g_completion_success, false); 470 } 471 472 if (g_completion_success) { 473 rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length); 474 /* Assert the write by comparing it with values read 475 * from each blockdev */ 476 CU_ASSERT_EQUAL(rc, 0); 477 } 478 } 479 480 static void 481 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset) 482 { 483 struct io_target *target; 484 char *tx_buf = NULL; 485 char *write_buf = NULL; 486 char *rx_buf = NULL; 487 int rc; 488 489 target = g_current_io_target; 490 491 if (!blockdev_io_valid_blocks(target->bdev, data_length)) { 492 return; 493 } 494 495 initialize_buffer(&tx_buf, 0xAA, data_length); 496 initialize_buffer(&rx_buf, 0, data_length); 497 initialize_buffer(&write_buf, 0xBB, data_length); 498 499 blockdev_write(target, tx_buf, offset, data_length, iov_len); 500 CU_ASSERT_EQUAL(g_completion_success, true); 501 502 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 503 CU_ASSERT_EQUAL(g_completion_success, true); 504 505 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 506 CU_ASSERT_EQUAL(g_completion_success, false); 507 508 blockdev_read(target, rx_buf, offset, data_length, iov_len); 509 CU_ASSERT_EQUAL(g_completion_success, true); 510 rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length); 511 /* Assert the write by comparing it with values read 512 * from each blockdev */ 513 CU_ASSERT_EQUAL(rc, 0); 514 } 515 516 static void 517 blockdev_write_read_4k(void) 518 { 519 uint32_t data_length; 520 uint64_t offset; 521 int pattern; 522 int expected_rc; 523 524 /* Data size = 4K */ 525 data_length = 4096; 526 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 527 offset = 0; 528 pattern = 0xA3; 529 /* Params are valid, hence the expected return value 530 * of write and read for all blockdevs is 0. */ 531 expected_rc = 0; 532 533 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 534 } 535 536 static void 537 blockdev_write_zeroes_read_4k(void) 538 { 539 uint32_t data_length; 540 uint64_t offset; 541 int pattern; 542 int expected_rc; 543 544 /* Data size = 4K */ 545 data_length = 4096; 546 offset = 0; 547 pattern = 0xA3; 548 /* Params are valid, hence the expected return value 549 * of write_zeroes and read for all blockdevs is 0. */ 550 expected_rc = 0; 551 552 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 553 } 554 555 /* 556 * This i/o will not have to split at the bdev layer. 557 */ 558 static void 559 blockdev_write_zeroes_read_1m(void) 560 { 561 uint32_t data_length; 562 uint64_t offset; 563 int pattern; 564 int expected_rc; 565 566 /* Data size = 1M */ 567 data_length = 1048576; 568 offset = 0; 569 pattern = 0xA3; 570 /* Params are valid, hence the expected return value 571 * of write_zeroes and read for all blockdevs is 0. */ 572 expected_rc = 0; 573 574 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 575 } 576 577 /* 578 * This i/o will have to split at the bdev layer if 579 * write-zeroes is not supported by the bdev. 580 */ 581 static void 582 blockdev_write_zeroes_read_3m(void) 583 { 584 uint32_t data_length; 585 uint64_t offset; 586 int pattern; 587 int expected_rc; 588 589 /* Data size = 3M */ 590 data_length = 3145728; 591 offset = 0; 592 pattern = 0xA3; 593 /* Params are valid, hence the expected return value 594 * of write_zeroes and read for all blockdevs is 0. */ 595 expected_rc = 0; 596 597 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 598 } 599 600 /* 601 * This i/o will have to split at the bdev layer if 602 * write-zeroes is not supported by the bdev. It also 603 * tests a write size that is not an even multiple of 604 * the bdev layer zero buffer size. 605 */ 606 static void 607 blockdev_write_zeroes_read_3m_500k(void) 608 { 609 uint32_t data_length; 610 uint64_t offset; 611 int pattern; 612 int expected_rc; 613 614 /* Data size = 3.5M */ 615 data_length = 3670016; 616 offset = 0; 617 pattern = 0xA3; 618 /* Params are valid, hence the expected return value 619 * of write_zeroes and read for all blockdevs is 0. */ 620 expected_rc = 0; 621 622 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 623 } 624 625 static void 626 blockdev_writev_readv_4k(void) 627 { 628 uint32_t data_length, iov_len; 629 uint64_t offset; 630 int pattern; 631 int expected_rc; 632 633 /* Data size = 4K */ 634 data_length = 4096; 635 iov_len = 4096; 636 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 637 offset = 0; 638 pattern = 0xA3; 639 /* Params are valid, hence the expected return value 640 * of write and read for all blockdevs is 0. */ 641 expected_rc = 0; 642 643 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 644 } 645 646 static void 647 blockdev_comparev_and_writev(void) 648 { 649 uint32_t data_length, iov_len; 650 uint64_t offset; 651 652 data_length = 1; 653 iov_len = 1; 654 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 655 offset = 0; 656 657 blockdev_compare_and_write(data_length, iov_len, offset); 658 } 659 660 static void 661 blockdev_writev_readv_30x4k(void) 662 { 663 uint32_t data_length, iov_len; 664 uint64_t offset; 665 int pattern; 666 int expected_rc; 667 668 /* Data size = 4K */ 669 data_length = 4096 * 30; 670 iov_len = 4096; 671 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 672 offset = 0; 673 pattern = 0xA3; 674 /* Params are valid, hence the expected return value 675 * of write and read for all blockdevs is 0. */ 676 expected_rc = 0; 677 678 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 679 } 680 681 static void 682 blockdev_write_read_512Bytes(void) 683 { 684 uint32_t data_length; 685 uint64_t offset; 686 int pattern; 687 int expected_rc; 688 689 /* Data size = 512 */ 690 data_length = 512; 691 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 692 offset = 8192; 693 pattern = 0xA3; 694 /* Params are valid, hence the expected return value 695 * of write and read for all blockdevs is 0. */ 696 expected_rc = 0; 697 698 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 699 } 700 701 static void 702 blockdev_writev_readv_512Bytes(void) 703 { 704 uint32_t data_length, iov_len; 705 uint64_t offset; 706 int pattern; 707 int expected_rc; 708 709 /* Data size = 512 */ 710 data_length = 512; 711 iov_len = 512; 712 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 713 offset = 8192; 714 pattern = 0xA3; 715 /* Params are valid, hence the expected return value 716 * of write and read for all blockdevs is 0. */ 717 expected_rc = 0; 718 719 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 720 } 721 722 static void 723 blockdev_write_read_size_gt_128k(void) 724 { 725 uint32_t data_length; 726 uint64_t offset; 727 int pattern; 728 int expected_rc; 729 730 /* Data size = 132K */ 731 data_length = 135168; 732 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 733 offset = 8192; 734 pattern = 0xA3; 735 /* Params are valid, hence the expected return value 736 * of write and read for all blockdevs is 0. */ 737 expected_rc = 0; 738 739 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 740 } 741 742 static void 743 blockdev_writev_readv_size_gt_128k(void) 744 { 745 uint32_t data_length, iov_len; 746 uint64_t offset; 747 int pattern; 748 int expected_rc; 749 750 /* Data size = 132K */ 751 data_length = 135168; 752 iov_len = 135168; 753 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 754 offset = 8192; 755 pattern = 0xA3; 756 /* Params are valid, hence the expected return value 757 * of write and read for all blockdevs is 0. */ 758 expected_rc = 0; 759 760 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 761 } 762 763 static void 764 blockdev_writev_readv_size_gt_128k_two_iov(void) 765 { 766 uint32_t data_length, iov_len; 767 uint64_t offset; 768 int pattern; 769 int expected_rc; 770 771 /* Data size = 132K */ 772 data_length = 135168; 773 iov_len = 128 * 1024; 774 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 775 offset = 8192; 776 pattern = 0xA3; 777 /* Params are valid, hence the expected return value 778 * of write and read for all blockdevs is 0. */ 779 expected_rc = 0; 780 781 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 782 } 783 784 static void 785 blockdev_write_read_invalid_size(void) 786 { 787 uint32_t data_length; 788 uint64_t offset; 789 int pattern; 790 int expected_rc; 791 792 /* Data size is not a multiple of the block size */ 793 data_length = 0x1015; 794 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 795 offset = 8192; 796 pattern = 0xA3; 797 /* Params are invalid, hence the expected return value 798 * of write and read for all blockdevs is < 0 */ 799 expected_rc = -1; 800 801 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 802 } 803 804 static void 805 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void) 806 { 807 struct io_target *target; 808 struct spdk_bdev *bdev; 809 char *tx_buf = NULL; 810 char *rx_buf = NULL; 811 uint64_t offset; 812 uint32_t block_size; 813 int rc; 814 815 target = g_current_io_target; 816 bdev = target->bdev; 817 818 block_size = spdk_bdev_get_block_size(bdev); 819 820 /* The start offset has been set to a marginal value 821 * such that offset + nbytes == Total size of 822 * blockdev. */ 823 offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size); 824 825 initialize_buffer(&tx_buf, 0xA3, block_size); 826 initialize_buffer(&rx_buf, 0, block_size); 827 828 blockdev_write(target, tx_buf, offset, block_size, 0); 829 CU_ASSERT_EQUAL(g_completion_success, true); 830 831 blockdev_read(target, rx_buf, offset, block_size, 0); 832 CU_ASSERT_EQUAL(g_completion_success, true); 833 834 rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size); 835 /* Assert the write by comparing it with values read 836 * from each blockdev */ 837 CU_ASSERT_EQUAL(rc, 0); 838 } 839 840 static void 841 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void) 842 { 843 struct io_target *target; 844 struct spdk_bdev *bdev; 845 char *tx_buf = NULL; 846 char *rx_buf = NULL; 847 int data_length; 848 uint64_t offset; 849 int pattern; 850 851 /* Tests the overflow condition of the blockdevs. */ 852 data_length = 4096; 853 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 854 pattern = 0xA3; 855 856 target = g_current_io_target; 857 bdev = target->bdev; 858 859 /* The start offset has been set to a valid value 860 * but offset + nbytes is greater than the Total size 861 * of the blockdev. The test should fail. */ 862 offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024); 863 864 initialize_buffer(&tx_buf, pattern, data_length); 865 initialize_buffer(&rx_buf, 0, data_length); 866 867 blockdev_write(target, tx_buf, offset, data_length, 0); 868 CU_ASSERT_EQUAL(g_completion_success, false); 869 870 blockdev_read(target, rx_buf, offset, data_length, 0); 871 CU_ASSERT_EQUAL(g_completion_success, false); 872 } 873 874 static void 875 blockdev_write_read_max_offset(void) 876 { 877 int data_length; 878 uint64_t offset; 879 int pattern; 880 int expected_rc; 881 882 data_length = 4096; 883 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 884 /* The start offset has been set to UINT64_MAX such that 885 * adding nbytes wraps around and points to an invalid address. */ 886 offset = UINT64_MAX; 887 pattern = 0xA3; 888 /* Params are invalid, hence the expected return value 889 * of write and read for all blockdevs is < 0 */ 890 expected_rc = -1; 891 892 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 893 } 894 895 static void 896 blockdev_overlapped_write_read_8k(void) 897 { 898 int data_length; 899 uint64_t offset; 900 int pattern; 901 int expected_rc; 902 903 /* Data size = 8K */ 904 data_length = 8192; 905 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 906 offset = 0; 907 pattern = 0xA3; 908 /* Params are valid, hence the expected return value 909 * of write and read for all blockdevs is 0. */ 910 expected_rc = 0; 911 /* Assert the write by comparing it with values read 912 * from the same offset for each blockdev */ 913 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 914 915 /* Overwrite the pattern 0xbb of size 8K on an address offset overlapping 916 * with the address written above and assert the new value in 917 * the overlapped address range */ 918 /* Populate 8k with value 0xBB */ 919 pattern = 0xBB; 920 /* Offset = 6144; Overlap offset addresses and write value 0xbb */ 921 offset = 4096; 922 /* Assert the write by comparing it with values read 923 * from the overlapped offset for each blockdev */ 924 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 925 } 926 927 static void 928 __blockdev_reset(void *arg) 929 { 930 struct bdevio_request *req = arg; 931 struct io_target *target = req->target; 932 int rc; 933 934 rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL); 935 if (rc < 0) { 936 g_completion_success = false; 937 wake_ut_thread(); 938 } 939 } 940 941 static void 942 blockdev_test_reset(void) 943 { 944 struct bdevio_request req; 945 struct io_target *target; 946 947 target = g_current_io_target; 948 req.target = target; 949 950 g_completion_success = false; 951 952 execute_spdk_function(__blockdev_reset, &req); 953 954 /* Workaround: NVMe-oF target doesn't support reset yet - so for now 955 * don't fail the test if it's an NVMe bdev. 956 */ 957 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 958 CU_ASSERT_EQUAL(g_completion_success, true); 959 } 960 } 961 962 struct bdevio_passthrough_request { 963 struct spdk_nvme_cmd cmd; 964 void *buf; 965 uint32_t len; 966 struct io_target *target; 967 int sct; 968 int sc; 969 uint32_t cdw0; 970 }; 971 972 static void 973 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 974 { 975 struct bdevio_passthrough_request *pt_req = arg; 976 977 spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc); 978 spdk_bdev_free_io(bdev_io); 979 wake_ut_thread(); 980 } 981 982 static void 983 __blockdev_nvme_passthru(void *arg) 984 { 985 struct bdevio_passthrough_request *pt_req = arg; 986 struct io_target *target = pt_req->target; 987 int rc; 988 989 rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch, 990 &pt_req->cmd, pt_req->buf, pt_req->len, 991 nvme_pt_test_complete, pt_req); 992 if (rc) { 993 wake_ut_thread(); 994 } 995 } 996 997 static void 998 blockdev_test_nvme_passthru_rw(void) 999 { 1000 struct bdevio_passthrough_request pt_req; 1001 void *write_buf, *read_buf; 1002 struct io_target *target; 1003 1004 target = g_current_io_target; 1005 1006 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1007 return; 1008 } 1009 1010 memset(&pt_req, 0, sizeof(pt_req)); 1011 pt_req.target = target; 1012 pt_req.cmd.opc = SPDK_NVME_OPC_WRITE; 1013 pt_req.cmd.nsid = 1; 1014 *(uint64_t *)&pt_req.cmd.cdw10 = 4; 1015 pt_req.cmd.cdw12 = 0; 1016 1017 pt_req.len = spdk_bdev_get_block_size(target->bdev); 1018 write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1019 memset(write_buf, 0xA5, pt_req.len); 1020 pt_req.buf = write_buf; 1021 1022 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1023 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1024 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1025 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1026 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1027 1028 pt_req.cmd.opc = SPDK_NVME_OPC_READ; 1029 read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1030 pt_req.buf = read_buf; 1031 1032 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1033 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1034 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1035 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1036 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1037 1038 CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len)); 1039 spdk_free(read_buf); 1040 spdk_free(write_buf); 1041 } 1042 1043 static void 1044 blockdev_test_nvme_passthru_vendor_specific(void) 1045 { 1046 struct bdevio_passthrough_request pt_req; 1047 struct io_target *target; 1048 1049 target = g_current_io_target; 1050 1051 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1052 return; 1053 } 1054 1055 memset(&pt_req, 0, sizeof(pt_req)); 1056 pt_req.target = target; 1057 pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */ 1058 pt_req.cmd.nsid = 1; 1059 1060 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1061 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1062 pt_req.cdw0 = 0xbeef; 1063 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1064 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1065 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE); 1066 CU_ASSERT(pt_req.cdw0 == 0x0); 1067 } 1068 1069 static void 1070 __blockdev_nvme_admin_passthru(void *arg) 1071 { 1072 struct bdevio_passthrough_request *pt_req = arg; 1073 struct io_target *target = pt_req->target; 1074 int rc; 1075 1076 rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch, 1077 &pt_req->cmd, pt_req->buf, pt_req->len, 1078 nvme_pt_test_complete, pt_req); 1079 if (rc) { 1080 wake_ut_thread(); 1081 } 1082 } 1083 1084 static void 1085 blockdev_test_nvme_admin_passthru(void) 1086 { 1087 struct io_target *target; 1088 struct bdevio_passthrough_request pt_req; 1089 1090 target = g_current_io_target; 1091 1092 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) { 1093 return; 1094 } 1095 1096 memset(&pt_req, 0, sizeof(pt_req)); 1097 pt_req.target = target; 1098 pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY; 1099 pt_req.cmd.nsid = 0; 1100 *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR; 1101 1102 pt_req.len = sizeof(struct spdk_nvme_ctrlr_data); 1103 pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1104 1105 pt_req.sct = SPDK_NVME_SCT_GENERIC; 1106 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1107 execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req); 1108 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1109 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1110 } 1111 1112 static void 1113 __stop_init_thread(void *arg) 1114 { 1115 unsigned num_failures = g_num_failures; 1116 struct spdk_jsonrpc_request *request = arg; 1117 1118 g_num_failures = 0; 1119 1120 bdevio_cleanup_targets(); 1121 if (g_wait_for_tests && !g_shutdown) { 1122 /* Do not stop the app yet, wait for another RPC */ 1123 rpc_perform_tests_cb(num_failures, request); 1124 return; 1125 } 1126 spdk_app_stop(num_failures); 1127 } 1128 1129 static void 1130 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request) 1131 { 1132 g_num_failures = num_failures; 1133 1134 spdk_thread_send_msg(g_thread_init, __stop_init_thread, request); 1135 } 1136 1137 static int 1138 suite_init(void) 1139 { 1140 if (g_current_io_target == NULL) { 1141 g_current_io_target = g_io_targets; 1142 } 1143 return 0; 1144 } 1145 1146 static int 1147 suite_fini(void) 1148 { 1149 g_current_io_target = g_current_io_target->next; 1150 return 0; 1151 } 1152 1153 #define SUITE_NAME_MAX 64 1154 1155 static int 1156 __setup_ut_on_single_target(struct io_target *target) 1157 { 1158 unsigned rc = 0; 1159 CU_pSuite suite = NULL; 1160 char name[SUITE_NAME_MAX]; 1161 1162 snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev)); 1163 suite = CU_add_suite(name, suite_init, suite_fini); 1164 if (suite == NULL) { 1165 CU_cleanup_registry(); 1166 rc = CU_get_error(); 1167 return -rc; 1168 } 1169 1170 if ( 1171 CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL 1172 || CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL 1173 || CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL 1174 || CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL 1175 || CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL 1176 || CU_add_test(suite, "blockdev reset", 1177 blockdev_test_reset) == NULL 1178 || CU_add_test(suite, "blockdev write read 512 bytes", 1179 blockdev_write_read_512Bytes) == NULL 1180 || CU_add_test(suite, "blockdev write read size > 128k", 1181 blockdev_write_read_size_gt_128k) == NULL 1182 || CU_add_test(suite, "blockdev write read invalid size", 1183 blockdev_write_read_invalid_size) == NULL 1184 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev", 1185 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL 1186 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev", 1187 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL 1188 || CU_add_test(suite, "blockdev write read max offset", 1189 blockdev_write_read_max_offset) == NULL 1190 || CU_add_test(suite, "blockdev write read 8k on overlapped address offset", 1191 blockdev_overlapped_write_read_8k) == NULL 1192 || CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL 1193 || CU_add_test(suite, "blockdev writev readv 30 x 4k", 1194 blockdev_writev_readv_30x4k) == NULL 1195 || CU_add_test(suite, "blockdev writev readv 512 bytes", 1196 blockdev_writev_readv_512Bytes) == NULL 1197 || CU_add_test(suite, "blockdev writev readv size > 128k", 1198 blockdev_writev_readv_size_gt_128k) == NULL 1199 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs", 1200 blockdev_writev_readv_size_gt_128k_two_iov) == NULL 1201 || CU_add_test(suite, "blockdev comparev and writev", blockdev_comparev_and_writev) == NULL 1202 || CU_add_test(suite, "blockdev nvme passthru rw", 1203 blockdev_test_nvme_passthru_rw) == NULL 1204 || CU_add_test(suite, "blockdev nvme passthru vendor specific", 1205 blockdev_test_nvme_passthru_vendor_specific) == NULL 1206 || CU_add_test(suite, "blockdev nvme admin passthru", 1207 blockdev_test_nvme_admin_passthru) == NULL 1208 ) { 1209 CU_cleanup_registry(); 1210 rc = CU_get_error(); 1211 return -rc; 1212 } 1213 return 0; 1214 } 1215 1216 static void 1217 __run_ut_thread(void *arg) 1218 { 1219 struct spdk_jsonrpc_request *request = arg; 1220 int rc = 0; 1221 struct io_target *target; 1222 unsigned num_failures; 1223 1224 if (CU_initialize_registry() != CUE_SUCCESS) { 1225 /* CUnit error, probably won't recover */ 1226 rc = CU_get_error(); 1227 stop_init_thread(-rc, request); 1228 } 1229 1230 target = g_io_targets; 1231 while (target != NULL) { 1232 rc = __setup_ut_on_single_target(target); 1233 if (rc < 0) { 1234 /* CUnit error, probably won't recover */ 1235 stop_init_thread(-rc, request); 1236 } 1237 target = target->next; 1238 } 1239 CU_basic_set_mode(CU_BRM_VERBOSE); 1240 CU_basic_run_tests(); 1241 num_failures = CU_get_number_of_failures(); 1242 CU_cleanup_registry(); 1243 1244 stop_init_thread(num_failures, request); 1245 } 1246 1247 static void 1248 __construct_targets(void *arg) 1249 { 1250 if (bdevio_construct_targets() < 0) { 1251 spdk_app_stop(-1); 1252 return; 1253 } 1254 1255 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL); 1256 } 1257 1258 static void 1259 test_main(void *arg1) 1260 { 1261 struct spdk_cpuset tmpmask = {}; 1262 const struct spdk_cpuset *appmask; 1263 uint32_t cpu, init_cpu; 1264 1265 pthread_mutex_init(&g_test_mutex, NULL); 1266 pthread_cond_init(&g_test_cond, NULL); 1267 1268 appmask = spdk_app_get_core_mask(); 1269 1270 if (spdk_cpuset_count(appmask) < 3) { 1271 spdk_app_stop(-1); 1272 return; 1273 } 1274 1275 init_cpu = spdk_env_get_current_core(); 1276 g_thread_init = spdk_get_thread(); 1277 1278 for (cpu = 0; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) { 1279 if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) { 1280 spdk_cpuset_zero(&tmpmask); 1281 spdk_cpuset_set_cpu(&tmpmask, cpu, true); 1282 g_thread_ut = spdk_thread_create("ut_thread", &tmpmask); 1283 break; 1284 } 1285 } 1286 1287 if (cpu == SPDK_ENV_LCORE_ID_ANY) { 1288 spdk_app_stop(-1); 1289 return; 1290 } 1291 1292 for (cpu++; cpu < SPDK_ENV_LCORE_ID_ANY; cpu++) { 1293 if (cpu != init_cpu && spdk_cpuset_get_cpu(appmask, cpu)) { 1294 spdk_cpuset_zero(&tmpmask); 1295 spdk_cpuset_set_cpu(&tmpmask, cpu, true); 1296 g_thread_io = spdk_thread_create("io_thread", &tmpmask); 1297 break; 1298 } 1299 } 1300 1301 if (cpu == SPDK_ENV_LCORE_ID_ANY) { 1302 spdk_app_stop(-1); 1303 return; 1304 } 1305 1306 if (g_wait_for_tests) { 1307 /* Do not perform any tests until RPC is received */ 1308 return; 1309 } 1310 1311 spdk_thread_send_msg(g_thread_init, __construct_targets, NULL); 1312 } 1313 1314 static void 1315 bdevio_usage(void) 1316 { 1317 printf(" -w start bdevio app and wait for RPC to start the tests\n"); 1318 } 1319 1320 static int 1321 bdevio_parse_arg(int ch, char *arg) 1322 { 1323 switch (ch) { 1324 case 'w': 1325 g_wait_for_tests = true; 1326 break; 1327 default: 1328 return -EINVAL; 1329 } 1330 return 0; 1331 } 1332 1333 struct rpc_perform_tests { 1334 char *name; 1335 }; 1336 1337 static void 1338 free_rpc_perform_tests(struct rpc_perform_tests *r) 1339 { 1340 free(r->name); 1341 } 1342 1343 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = { 1344 {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true}, 1345 }; 1346 1347 static void 1348 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request) 1349 { 1350 struct spdk_json_write_ctx *w; 1351 1352 if (num_failures == 0) { 1353 w = spdk_jsonrpc_begin_result(request); 1354 spdk_json_write_uint32(w, num_failures); 1355 spdk_jsonrpc_end_result(request, w); 1356 } else { 1357 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1358 "%d test cases failed", num_failures); 1359 } 1360 } 1361 1362 static void 1363 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) 1364 { 1365 struct rpc_perform_tests req = {NULL}; 1366 struct spdk_bdev *bdev; 1367 int rc; 1368 1369 if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders, 1370 SPDK_COUNTOF(rpc_perform_tests_decoders), 1371 &req)) { 1372 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1373 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 1374 goto invalid; 1375 } 1376 1377 if (req.name) { 1378 bdev = spdk_bdev_get_by_name(req.name); 1379 if (bdev == NULL) { 1380 SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name); 1381 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1382 "Bdev '%s' does not exist: %s", 1383 req.name, spdk_strerror(ENODEV)); 1384 goto invalid; 1385 } 1386 rc = bdevio_construct_target(bdev); 1387 if (rc < 0) { 1388 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev)); 1389 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1390 "Could not construct target for bdev '%s': %s", 1391 spdk_bdev_get_name(bdev), spdk_strerror(-rc)); 1392 goto invalid; 1393 } 1394 } else { 1395 rc = bdevio_construct_targets(); 1396 if (rc < 0) { 1397 SPDK_ERRLOG("Could not construct targets for all bdevs\n"); 1398 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1399 "Could not construct targets for all bdevs: %s", 1400 spdk_strerror(-rc)); 1401 goto invalid; 1402 } 1403 } 1404 free_rpc_perform_tests(&req); 1405 1406 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request); 1407 1408 return; 1409 1410 invalid: 1411 free_rpc_perform_tests(&req); 1412 } 1413 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME) 1414 1415 static void 1416 spdk_bdevio_shutdown_cb(void) 1417 { 1418 g_shutdown = true; 1419 spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL); 1420 } 1421 1422 int 1423 main(int argc, char **argv) 1424 { 1425 int rc; 1426 struct spdk_app_opts opts = {}; 1427 1428 spdk_app_opts_init(&opts, sizeof(opts)); 1429 opts.name = "bdevio"; 1430 opts.reactor_mask = "0x7"; 1431 opts.shutdown_cb = spdk_bdevio_shutdown_cb; 1432 1433 if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL, 1434 bdevio_parse_arg, bdevio_usage)) != 1435 SPDK_APP_PARSE_ARGS_SUCCESS) { 1436 return rc; 1437 } 1438 1439 rc = spdk_app_start(&opts, test_main, NULL); 1440 spdk_app_fini(); 1441 1442 return rc; 1443 } 1444