1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (C) 2008-2012 Daisuke Aoyama <aoyama@peach.ne.jp>. 5 * Copyright (c) Intel Corporation. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk/bdev.h" 38 #include "spdk/copy_engine.h" 39 #include "spdk/env.h" 40 #include "spdk/log.h" 41 #include "spdk/thread.h" 42 43 #include "CUnit/Basic.h" 44 45 #define BUFFER_IOVS 1024 46 #define BUFFER_SIZE 260 * 1024 47 #define BDEV_TASK_ARRAY_SIZE 2048 48 49 #define LCORE_ID_INIT 0 50 #define LCORE_ID_UT 1 51 #define LCORE_ID_IO 2 52 53 #include "../common.c" 54 55 pthread_mutex_t g_test_mutex; 56 pthread_cond_t g_test_cond; 57 58 struct io_target { 59 struct spdk_bdev *bdev; 60 struct spdk_bdev_desc *bdev_desc; 61 struct spdk_io_channel *ch; 62 struct io_target *next; 63 }; 64 65 struct bdevio_request { 66 char *buf; 67 int data_len; 68 uint64_t offset; 69 struct iovec iov[BUFFER_IOVS]; 70 int iovcnt; 71 struct io_target *target; 72 }; 73 74 struct io_target *g_io_targets = NULL; 75 76 static void 77 execute_spdk_function(spdk_event_fn fn, void *arg1, void *arg2) 78 { 79 struct spdk_event *event; 80 81 event = spdk_event_allocate(LCORE_ID_IO, fn, arg1, arg2); 82 pthread_mutex_lock(&g_test_mutex); 83 spdk_event_call(event); 84 pthread_cond_wait(&g_test_cond, &g_test_mutex); 85 pthread_mutex_unlock(&g_test_mutex); 86 } 87 88 static void 89 wake_ut_thread(void) 90 { 91 pthread_mutex_lock(&g_test_mutex); 92 pthread_cond_signal(&g_test_cond); 93 pthread_mutex_unlock(&g_test_mutex); 94 } 95 96 static void 97 __get_io_channel(void *arg1, void *arg2) 98 { 99 struct io_target *target = arg1; 100 101 target->ch = spdk_bdev_get_io_channel(target->bdev_desc); 102 assert(target->ch); 103 wake_ut_thread(); 104 } 105 106 static int 107 bdevio_construct_targets(void) 108 { 109 struct spdk_bdev *bdev; 110 struct io_target *target; 111 int rc; 112 113 printf("I/O targets:\n"); 114 115 bdev = spdk_bdev_first_leaf(); 116 while (bdev != NULL) { 117 uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev); 118 uint32_t block_size = spdk_bdev_get_block_size(bdev); 119 120 target = malloc(sizeof(struct io_target)); 121 if (target == NULL) { 122 return -ENOMEM; 123 } 124 125 rc = spdk_bdev_open(bdev, true, NULL, NULL, &target->bdev_desc); 126 if (rc != 0) { 127 free(target); 128 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 129 bdev = spdk_bdev_next_leaf(bdev); 130 continue; 131 } 132 133 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n", 134 spdk_bdev_get_name(bdev), 135 num_blocks, block_size, 136 (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024)); 137 138 target->bdev = bdev; 139 target->next = g_io_targets; 140 execute_spdk_function(__get_io_channel, target, NULL); 141 g_io_targets = target; 142 143 bdev = spdk_bdev_next_leaf(bdev); 144 } 145 146 return 0; 147 } 148 149 static void 150 __put_io_channel(void *arg1, void *arg2) 151 { 152 struct io_target *target = arg1; 153 154 spdk_put_io_channel(target->ch); 155 wake_ut_thread(); 156 } 157 158 static void 159 bdevio_cleanup_targets(void) 160 { 161 struct io_target *target; 162 163 target = g_io_targets; 164 while (target != NULL) { 165 execute_spdk_function(__put_io_channel, target, NULL); 166 spdk_bdev_close(target->bdev_desc); 167 g_io_targets = target->next; 168 free(target); 169 target = g_io_targets; 170 } 171 } 172 173 static bool g_completion_success; 174 175 static void 176 initialize_buffer(char **buf, int pattern, int size) 177 { 178 *buf = spdk_dma_zmalloc(size, 0x1000, NULL); 179 memset(*buf, pattern, size); 180 } 181 182 static void 183 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 184 { 185 g_completion_success = success; 186 spdk_bdev_free_io(bdev_io); 187 wake_ut_thread(); 188 } 189 190 static void 191 __blockdev_write(void *arg1, void *arg2) 192 { 193 struct bdevio_request *req = arg1; 194 struct io_target *target = req->target; 195 int rc; 196 197 if (req->iovcnt) { 198 rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 199 req->data_len, quick_test_complete, NULL); 200 } else { 201 rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset, 202 req->data_len, quick_test_complete, NULL); 203 } 204 205 if (rc) { 206 g_completion_success = false; 207 wake_ut_thread(); 208 } 209 } 210 211 static void 212 __blockdev_write_zeroes(void *arg1, void *arg2) 213 { 214 struct bdevio_request *req = arg1; 215 struct io_target *target = req->target; 216 int rc; 217 218 rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset, 219 req->data_len, quick_test_complete, NULL); 220 if (rc) { 221 g_completion_success = false; 222 wake_ut_thread(); 223 } 224 } 225 226 static void 227 sgl_chop_buffer(struct bdevio_request *req, int iov_len) 228 { 229 int data_len = req->data_len; 230 char *buf = req->buf; 231 232 req->iovcnt = 0; 233 if (!iov_len) { 234 return; 235 } 236 237 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) { 238 if (data_len < iov_len) { 239 iov_len = data_len; 240 } 241 242 req->iov[req->iovcnt].iov_base = buf; 243 req->iov[req->iovcnt].iov_len = iov_len; 244 245 buf += iov_len; 246 data_len -= iov_len; 247 } 248 249 CU_ASSERT_EQUAL_FATAL(data_len, 0); 250 } 251 252 static void 253 blockdev_write(struct io_target *target, char *tx_buf, 254 uint64_t offset, int data_len, int iov_len) 255 { 256 struct bdevio_request req; 257 258 req.target = target; 259 req.buf = tx_buf; 260 req.data_len = data_len; 261 req.offset = offset; 262 sgl_chop_buffer(&req, iov_len); 263 264 g_completion_success = false; 265 266 execute_spdk_function(__blockdev_write, &req, NULL); 267 } 268 269 static void 270 blockdev_write_zeroes(struct io_target *target, char *tx_buf, 271 uint64_t offset, int data_len) 272 { 273 struct bdevio_request req; 274 275 req.target = target; 276 req.buf = tx_buf; 277 req.data_len = data_len; 278 req.offset = offset; 279 280 g_completion_success = false; 281 282 execute_spdk_function(__blockdev_write_zeroes, &req, NULL); 283 } 284 285 static void 286 __blockdev_read(void *arg1, void *arg2) 287 { 288 struct bdevio_request *req = arg1; 289 struct io_target *target = req->target; 290 int rc; 291 292 if (req->iovcnt) { 293 rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 294 req->data_len, quick_test_complete, NULL); 295 } else { 296 rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset, 297 req->data_len, quick_test_complete, NULL); 298 } 299 300 if (rc) { 301 g_completion_success = false; 302 wake_ut_thread(); 303 } 304 } 305 306 static void 307 blockdev_read(struct io_target *target, char *rx_buf, 308 uint64_t offset, int data_len, int iov_len) 309 { 310 struct bdevio_request req; 311 312 req.target = target; 313 req.buf = rx_buf; 314 req.data_len = data_len; 315 req.offset = offset; 316 req.iovcnt = 0; 317 sgl_chop_buffer(&req, iov_len); 318 319 g_completion_success = false; 320 321 execute_spdk_function(__blockdev_read, &req, NULL); 322 } 323 324 static int 325 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length) 326 { 327 int rc; 328 rc = memcmp(rx_buf, tx_buf, data_length); 329 330 spdk_dma_free(rx_buf); 331 spdk_dma_free(tx_buf); 332 333 return rc; 334 } 335 336 static void 337 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset, 338 int expected_rc, bool write_zeroes) 339 { 340 struct io_target *target; 341 char *tx_buf = NULL; 342 char *rx_buf = NULL; 343 int rc; 344 345 target = g_io_targets; 346 while (target != NULL) { 347 if (data_length < spdk_bdev_get_block_size(target->bdev) || 348 data_length / spdk_bdev_get_block_size(target->bdev) > spdk_bdev_get_num_blocks(target->bdev)) { 349 target = target->next; 350 continue; 351 } 352 353 if (!write_zeroes) { 354 initialize_buffer(&tx_buf, pattern, data_length); 355 initialize_buffer(&rx_buf, 0, data_length); 356 357 blockdev_write(target, tx_buf, offset, data_length, iov_len); 358 } else { 359 initialize_buffer(&tx_buf, 0, data_length); 360 initialize_buffer(&rx_buf, pattern, data_length); 361 362 blockdev_write_zeroes(target, tx_buf, offset, data_length); 363 } 364 365 366 if (expected_rc == 0) { 367 CU_ASSERT_EQUAL(g_completion_success, true); 368 } else { 369 CU_ASSERT_EQUAL(g_completion_success, false); 370 } 371 blockdev_read(target, rx_buf, offset, data_length, iov_len); 372 373 if (expected_rc == 0) { 374 CU_ASSERT_EQUAL(g_completion_success, true); 375 } else { 376 CU_ASSERT_EQUAL(g_completion_success, false); 377 } 378 379 if (g_completion_success) { 380 rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length); 381 /* Assert the write by comparing it with values read 382 * from each blockdev */ 383 CU_ASSERT_EQUAL(rc, 0); 384 } 385 386 target = target->next; 387 } 388 } 389 390 static void 391 blockdev_write_read_4k(void) 392 { 393 uint32_t data_length; 394 uint64_t offset; 395 int pattern; 396 int expected_rc; 397 398 /* Data size = 4K */ 399 data_length = 4096; 400 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 401 offset = 0; 402 pattern = 0xA3; 403 /* Params are valid, hence the expected return value 404 * of write and read for all blockdevs is 0. */ 405 expected_rc = 0; 406 407 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 408 } 409 410 static void 411 blockdev_write_zeroes_read_4k(void) 412 { 413 uint32_t data_length; 414 uint64_t offset; 415 int pattern; 416 int expected_rc; 417 418 /* Data size = 4K */ 419 data_length = 4096; 420 offset = 0; 421 pattern = 0xA3; 422 /* Params are valid, hence the expected return value 423 * of write_zeroes and read for all blockdevs is 0. */ 424 expected_rc = 0; 425 426 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 427 } 428 429 /* 430 * This i/o will not have to split at the bdev layer. 431 */ 432 static void 433 blockdev_write_zeroes_read_1m(void) 434 { 435 uint32_t data_length; 436 uint64_t offset; 437 int pattern; 438 int expected_rc; 439 440 /* Data size = 1M */ 441 data_length = 1048576; 442 offset = 0; 443 pattern = 0xA3; 444 /* Params are valid, hence the expected return value 445 * of write_zeroes and read for all blockdevs is 0. */ 446 expected_rc = 0; 447 448 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 449 } 450 451 /* 452 * This i/o will have to split at the bdev layer if 453 * write-zeroes is not supported by the bdev. 454 */ 455 static void 456 blockdev_write_zeroes_read_3m(void) 457 { 458 uint32_t data_length; 459 uint64_t offset; 460 int pattern; 461 int expected_rc; 462 463 /* Data size = 3M */ 464 data_length = 3145728; 465 offset = 0; 466 pattern = 0xA3; 467 /* Params are valid, hence the expected return value 468 * of write_zeroes and read for all blockdevs is 0. */ 469 expected_rc = 0; 470 471 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 472 } 473 474 /* 475 * This i/o will have to split at the bdev layer if 476 * write-zeroes is not supported by the bdev. It also 477 * tests a write size that is not an even multiple of 478 * the bdev layer zero buffer size. 479 */ 480 static void 481 blockdev_write_zeroes_read_3m_500k(void) 482 { 483 uint32_t data_length; 484 uint64_t offset; 485 int pattern; 486 int expected_rc; 487 488 /* Data size = 3.5M */ 489 data_length = 3670016; 490 offset = 0; 491 pattern = 0xA3; 492 /* Params are valid, hence the expected return value 493 * of write_zeroes and read for all blockdevs is 0. */ 494 expected_rc = 0; 495 496 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 497 } 498 499 static void 500 blockdev_writev_readv_4k(void) 501 { 502 uint32_t data_length, iov_len; 503 uint64_t offset; 504 int pattern; 505 int expected_rc; 506 507 /* Data size = 4K */ 508 data_length = 4096; 509 iov_len = 4096; 510 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 511 offset = 0; 512 pattern = 0xA3; 513 /* Params are valid, hence the expected return value 514 * of write and read for all blockdevs is 0. */ 515 expected_rc = 0; 516 517 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 518 } 519 520 static void 521 blockdev_writev_readv_30x4k(void) 522 { 523 uint32_t data_length, iov_len; 524 uint64_t offset; 525 int pattern; 526 int expected_rc; 527 528 /* Data size = 4K */ 529 data_length = 4096 * 30; 530 iov_len = 4096; 531 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 532 offset = 0; 533 pattern = 0xA3; 534 /* Params are valid, hence the expected return value 535 * of write and read for all blockdevs is 0. */ 536 expected_rc = 0; 537 538 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 539 } 540 541 static void 542 blockdev_write_read_512Bytes(void) 543 { 544 uint32_t data_length; 545 uint64_t offset; 546 int pattern; 547 int expected_rc; 548 549 /* Data size = 512 */ 550 data_length = 512; 551 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 552 offset = 8192; 553 pattern = 0xA3; 554 /* Params are valid, hence the expected return value 555 * of write and read for all blockdevs is 0. */ 556 expected_rc = 0; 557 558 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 559 } 560 561 static void 562 blockdev_writev_readv_512Bytes(void) 563 { 564 uint32_t data_length, iov_len; 565 uint64_t offset; 566 int pattern; 567 int expected_rc; 568 569 /* Data size = 512 */ 570 data_length = 512; 571 iov_len = 512; 572 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 573 offset = 8192; 574 pattern = 0xA3; 575 /* Params are valid, hence the expected return value 576 * of write and read for all blockdevs is 0. */ 577 expected_rc = 0; 578 579 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 580 } 581 582 static void 583 blockdev_write_read_size_gt_128k(void) 584 { 585 uint32_t data_length; 586 uint64_t offset; 587 int pattern; 588 int expected_rc; 589 590 /* Data size = 132K */ 591 data_length = 135168; 592 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 593 offset = 8192; 594 pattern = 0xA3; 595 /* Params are valid, hence the expected return value 596 * of write and read for all blockdevs is 0. */ 597 expected_rc = 0; 598 599 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 600 } 601 602 static void 603 blockdev_writev_readv_size_gt_128k(void) 604 { 605 uint32_t data_length, iov_len; 606 uint64_t offset; 607 int pattern; 608 int expected_rc; 609 610 /* Data size = 132K */ 611 data_length = 135168; 612 iov_len = 135168; 613 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 614 offset = 8192; 615 pattern = 0xA3; 616 /* Params are valid, hence the expected return value 617 * of write and read for all blockdevs is 0. */ 618 expected_rc = 0; 619 620 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 621 } 622 623 static void 624 blockdev_writev_readv_size_gt_128k_two_iov(void) 625 { 626 uint32_t data_length, iov_len; 627 uint64_t offset; 628 int pattern; 629 int expected_rc; 630 631 /* Data size = 132K */ 632 data_length = 135168; 633 iov_len = 128 * 1024; 634 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 635 offset = 8192; 636 pattern = 0xA3; 637 /* Params are valid, hence the expected return value 638 * of write and read for all blockdevs is 0. */ 639 expected_rc = 0; 640 641 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 642 } 643 644 static void 645 blockdev_write_read_invalid_size(void) 646 { 647 uint32_t data_length; 648 uint64_t offset; 649 int pattern; 650 int expected_rc; 651 652 /* Data size is not a multiple of the block size */ 653 data_length = 0x1015; 654 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 655 offset = 8192; 656 pattern = 0xA3; 657 /* Params are invalid, hence the expected return value 658 * of write and read for all blockdevs is < 0 */ 659 expected_rc = -1; 660 661 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 662 } 663 664 static void 665 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void) 666 { 667 struct io_target *target; 668 struct spdk_bdev *bdev; 669 char *tx_buf = NULL; 670 char *rx_buf = NULL; 671 uint64_t offset; 672 uint32_t block_size; 673 int rc; 674 675 target = g_io_targets; 676 while (target != NULL) { 677 bdev = target->bdev; 678 679 block_size = spdk_bdev_get_block_size(bdev); 680 681 /* The start offset has been set to a marginal value 682 * such that offset + nbytes == Total size of 683 * blockdev. */ 684 offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size); 685 686 initialize_buffer(&tx_buf, 0xA3, block_size); 687 initialize_buffer(&rx_buf, 0, block_size); 688 689 blockdev_write(target, tx_buf, offset, block_size, 0); 690 CU_ASSERT_EQUAL(g_completion_success, true); 691 692 blockdev_read(target, rx_buf, offset, block_size, 0); 693 CU_ASSERT_EQUAL(g_completion_success, true); 694 695 rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size); 696 /* Assert the write by comparing it with values read 697 * from each blockdev */ 698 CU_ASSERT_EQUAL(rc, 0); 699 700 target = target->next; 701 } 702 } 703 704 static void 705 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void) 706 { 707 struct io_target *target; 708 struct spdk_bdev *bdev; 709 char *tx_buf = NULL; 710 char *rx_buf = NULL; 711 int data_length; 712 uint64_t offset; 713 int pattern; 714 715 /* Tests the overflow condition of the blockdevs. */ 716 data_length = 4096; 717 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 718 pattern = 0xA3; 719 720 target = g_io_targets; 721 while (target != NULL) { 722 bdev = target->bdev; 723 724 /* The start offset has been set to a valid value 725 * but offset + nbytes is greater than the Total size 726 * of the blockdev. The test should fail. */ 727 offset = ((spdk_bdev_get_num_blocks(bdev) * spdk_bdev_get_block_size(bdev)) - 1024); 728 729 initialize_buffer(&tx_buf, pattern, data_length); 730 initialize_buffer(&rx_buf, 0, data_length); 731 732 blockdev_write(target, tx_buf, offset, data_length, 0); 733 CU_ASSERT_EQUAL(g_completion_success, false); 734 735 blockdev_read(target, rx_buf, offset, data_length, 0); 736 CU_ASSERT_EQUAL(g_completion_success, false); 737 738 target = target->next; 739 } 740 } 741 742 static void 743 blockdev_write_read_max_offset(void) 744 { 745 int data_length; 746 uint64_t offset; 747 int pattern; 748 int expected_rc; 749 750 data_length = 4096; 751 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 752 /* The start offset has been set to UINT64_MAX such that 753 * adding nbytes wraps around and points to an invalid address. */ 754 offset = UINT64_MAX; 755 pattern = 0xA3; 756 /* Params are invalid, hence the expected return value 757 * of write and read for all blockdevs is < 0 */ 758 expected_rc = -1; 759 760 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 761 } 762 763 static void 764 blockdev_overlapped_write_read_8k(void) 765 { 766 int data_length; 767 uint64_t offset; 768 int pattern; 769 int expected_rc; 770 771 /* Data size = 8K */ 772 data_length = 8192; 773 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 774 offset = 0; 775 pattern = 0xA3; 776 /* Params are valid, hence the expected return value 777 * of write and read for all blockdevs is 0. */ 778 expected_rc = 0; 779 /* Assert the write by comparing it with values read 780 * from the same offset for each blockdev */ 781 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 782 783 /* Overwrite the pattern 0xbb of size 8K on an address offset overlapping 784 * with the address written above and assert the new value in 785 * the overlapped address range */ 786 /* Populate 8k with value 0xBB */ 787 pattern = 0xBB; 788 /* Offset = 6144; Overlap offset addresses and write value 0xbb */ 789 offset = 4096; 790 /* Assert the write by comparing it with values read 791 * from the overlapped offset for each blockdev */ 792 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 793 } 794 795 static void 796 __blockdev_reset(void *arg1, void *arg2) 797 { 798 struct bdevio_request *req = arg1; 799 struct io_target *target = req->target; 800 int rc; 801 802 rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL); 803 if (rc < 0) { 804 g_completion_success = false; 805 wake_ut_thread(); 806 } 807 } 808 809 static void 810 blockdev_reset(struct io_target *target) 811 { 812 struct bdevio_request req; 813 814 req.target = target; 815 816 g_completion_success = false; 817 818 execute_spdk_function(__blockdev_reset, &req, NULL); 819 } 820 821 static void 822 blockdev_test_reset(void) 823 { 824 struct io_target *target; 825 826 target = g_io_targets; 827 while (target != NULL) { 828 blockdev_reset(target); 829 CU_ASSERT_EQUAL(g_completion_success, true); 830 831 target = target->next; 832 } 833 } 834 835 static void 836 __stop_init_thread(void *arg1, void *arg2) 837 { 838 unsigned num_failures = (unsigned)(uintptr_t)arg1; 839 840 bdevio_cleanup_targets(); 841 spdk_app_stop(num_failures); 842 } 843 844 static void 845 stop_init_thread(unsigned num_failures) 846 { 847 struct spdk_event *event; 848 849 event = spdk_event_allocate(LCORE_ID_INIT, __stop_init_thread, 850 (void *)(uintptr_t)num_failures, NULL); 851 spdk_event_call(event); 852 } 853 854 static void 855 __run_ut_thread(void *arg1, void *arg2) 856 { 857 CU_pSuite suite = NULL; 858 unsigned num_failures; 859 860 if (CU_initialize_registry() != CUE_SUCCESS) { 861 stop_init_thread(CU_get_error()); 862 return; 863 } 864 865 suite = CU_add_suite("components_suite", NULL, NULL); 866 if (suite == NULL) { 867 CU_cleanup_registry(); 868 stop_init_thread(CU_get_error()); 869 return; 870 } 871 872 if ( 873 CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL 874 || CU_add_test(suite, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k) == NULL 875 || CU_add_test(suite, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m) == NULL 876 || CU_add_test(suite, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m) == NULL 877 || CU_add_test(suite, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k) == NULL 878 || CU_add_test(suite, "blockdev write read 512 bytes", 879 blockdev_write_read_512Bytes) == NULL 880 || CU_add_test(suite, "blockdev write read size > 128k", 881 blockdev_write_read_size_gt_128k) == NULL 882 || CU_add_test(suite, "blockdev write read invalid size", 883 blockdev_write_read_invalid_size) == NULL 884 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev", 885 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL 886 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev", 887 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL 888 || CU_add_test(suite, "blockdev write read max offset", 889 blockdev_write_read_max_offset) == NULL 890 || CU_add_test(suite, "blockdev write read 8k on overlapped address offset", 891 blockdev_overlapped_write_read_8k) == NULL 892 || CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL 893 || CU_add_test(suite, "blockdev writev readv 30 x 4k", 894 blockdev_writev_readv_30x4k) == NULL 895 || CU_add_test(suite, "blockdev writev readv 512 bytes", 896 blockdev_writev_readv_512Bytes) == NULL 897 || CU_add_test(suite, "blockdev writev readv size > 128k", 898 blockdev_writev_readv_size_gt_128k) == NULL 899 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs", 900 blockdev_writev_readv_size_gt_128k_two_iov) == NULL 901 || CU_add_test(suite, "blockdev reset", 902 blockdev_test_reset) == NULL 903 ) { 904 CU_cleanup_registry(); 905 stop_init_thread(CU_get_error()); 906 return; 907 } 908 909 CU_basic_set_mode(CU_BRM_VERBOSE); 910 CU_basic_run_tests(); 911 num_failures = CU_get_number_of_failures(); 912 CU_cleanup_registry(); 913 stop_init_thread(num_failures); 914 } 915 916 static void 917 test_main(void *arg1, void *arg2) 918 { 919 struct spdk_event *event; 920 921 pthread_mutex_init(&g_test_mutex, NULL); 922 pthread_cond_init(&g_test_cond, NULL); 923 924 if (bdevio_construct_targets() < 0) { 925 spdk_app_stop(-1); 926 return; 927 } 928 929 event = spdk_event_allocate(LCORE_ID_UT, __run_ut_thread, NULL, NULL); 930 spdk_event_call(event); 931 } 932 933 int 934 main(int argc, char **argv) 935 { 936 const char *config_file; 937 int num_failures; 938 struct spdk_app_opts opts = {}; 939 940 if (argc == 1) { 941 config_file = "/usr/local/etc/spdk/iscsi.conf"; 942 } else { 943 config_file = argv[1]; 944 } 945 946 bdevtest_init(config_file, "0x7", &opts); 947 opts.rpc_addr = NULL; 948 949 num_failures = spdk_app_start(&opts, test_main, NULL, NULL); 950 spdk_app_fini(); 951 952 return num_failures; 953 } 954