1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk/bdev.h" 38 #include "spdk/accel_engine.h" 39 #include "spdk/env.h" 40 #include "spdk/log.h" 41 #include "spdk/thread.h" 42 #include "spdk/event.h" 43 #include "spdk/rpc.h" 44 #include "spdk/util.h" 45 #include "spdk/string.h" 46 47 #include "bdev_internal.h" 48 #include "CUnit/Basic.h" 49 50 #define BUFFER_IOVS 1024 51 #define BUFFER_SIZE 260 * 1024 52 #define BDEV_TASK_ARRAY_SIZE 2048 53 54 pthread_mutex_t g_test_mutex; 55 pthread_cond_t g_test_cond; 56 57 static struct spdk_thread *g_thread_init; 58 static struct spdk_thread *g_thread_ut; 59 static struct spdk_thread *g_thread_io; 60 static bool g_wait_for_tests = false; 61 static int g_num_failures = 0; 62 static bool g_shutdown = false; 63 64 struct io_target { 65 struct spdk_bdev *bdev; 66 struct spdk_bdev_desc *bdev_desc; 67 struct spdk_io_channel *ch; 68 struct io_target *next; 69 }; 70 71 struct bdevio_request { 72 char *buf; 73 char *fused_buf; 74 int data_len; 75 uint64_t offset; 76 struct iovec iov[BUFFER_IOVS]; 77 int iovcnt; 78 struct iovec fused_iov[BUFFER_IOVS]; 79 int fused_iovcnt; 80 struct io_target *target; 81 }; 82 83 struct io_target *g_io_targets = NULL; 84 struct io_target *g_current_io_target = NULL; 85 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request); 86 87 static void 88 execute_spdk_function(spdk_msg_fn fn, void *arg) 89 { 90 pthread_mutex_lock(&g_test_mutex); 91 spdk_thread_send_msg(g_thread_io, fn, arg); 92 pthread_cond_wait(&g_test_cond, &g_test_mutex); 93 pthread_mutex_unlock(&g_test_mutex); 94 } 95 96 static void 97 wake_ut_thread(void) 98 { 99 pthread_mutex_lock(&g_test_mutex); 100 pthread_cond_signal(&g_test_cond); 101 pthread_mutex_unlock(&g_test_mutex); 102 } 103 104 static void 105 __get_io_channel(void *arg) 106 { 107 struct io_target *target = arg; 108 109 target->ch = spdk_bdev_get_io_channel(target->bdev_desc); 110 assert(target->ch); 111 wake_ut_thread(); 112 } 113 114 static void 115 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 116 void *event_ctx) 117 { 118 } 119 120 static int 121 bdevio_construct_target(struct spdk_bdev *bdev) 122 { 123 struct io_target *target; 124 int rc; 125 uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev); 126 uint32_t block_size = spdk_bdev_get_block_size(bdev); 127 128 target = malloc(sizeof(struct io_target)); 129 if (target == NULL) { 130 return -ENOMEM; 131 } 132 133 rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL, 134 &target->bdev_desc); 135 if (rc != 0) { 136 free(target); 137 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 138 return rc; 139 } 140 141 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n", 142 spdk_bdev_get_name(bdev), 143 num_blocks, block_size, 144 (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024)); 145 146 target->bdev = bdev; 147 target->next = g_io_targets; 148 execute_spdk_function(__get_io_channel, target); 149 g_io_targets = target; 150 151 return 0; 152 } 153 154 static int 155 bdevio_construct_targets(void) 156 { 157 struct spdk_bdev *bdev; 158 int rc; 159 160 printf("I/O targets:\n"); 161 162 bdev = spdk_bdev_first_leaf(); 163 while (bdev != NULL) { 164 rc = bdevio_construct_target(bdev); 165 if (rc < 0) { 166 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 167 return rc; 168 } 169 bdev = spdk_bdev_next_leaf(bdev); 170 } 171 172 if (g_io_targets == NULL) { 173 SPDK_ERRLOG("No bdevs to perform tests on\n"); 174 return -1; 175 } 176 177 return 0; 178 } 179 180 static void 181 __put_io_channel(void *arg) 182 { 183 struct io_target *target = arg; 184 185 spdk_put_io_channel(target->ch); 186 wake_ut_thread(); 187 } 188 189 static void 190 bdevio_cleanup_targets(void) 191 { 192 struct io_target *target; 193 194 target = g_io_targets; 195 while (target != NULL) { 196 execute_spdk_function(__put_io_channel, target); 197 spdk_bdev_close(target->bdev_desc); 198 g_io_targets = target->next; 199 free(target); 200 target = g_io_targets; 201 } 202 } 203 204 static bool g_completion_success; 205 206 static void 207 initialize_buffer(char **buf, int pattern, int size) 208 { 209 *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 210 memset(*buf, pattern, size); 211 } 212 213 static void 214 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 215 { 216 g_completion_success = success; 217 spdk_bdev_free_io(bdev_io); 218 wake_ut_thread(); 219 } 220 221 static uint64_t 222 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes) 223 { 224 uint32_t block_size = spdk_bdev_get_block_size(bdev); 225 226 CU_ASSERT(bytes % block_size == 0); 227 return bytes / block_size; 228 } 229 230 static void 231 __blockdev_write(void *arg) 232 { 233 struct bdevio_request *req = arg; 234 struct io_target *target = req->target; 235 int rc; 236 237 if (req->iovcnt) { 238 rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 239 req->data_len, quick_test_complete, NULL); 240 } else { 241 rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset, 242 req->data_len, quick_test_complete, NULL); 243 } 244 245 if (rc) { 246 g_completion_success = false; 247 wake_ut_thread(); 248 } 249 } 250 251 static void 252 __blockdev_write_zeroes(void *arg) 253 { 254 struct bdevio_request *req = arg; 255 struct io_target *target = req->target; 256 int rc; 257 258 rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset, 259 req->data_len, quick_test_complete, NULL); 260 if (rc) { 261 g_completion_success = false; 262 wake_ut_thread(); 263 } 264 } 265 266 static void 267 __blockdev_compare_and_write(void *arg) 268 { 269 struct bdevio_request *req = arg; 270 struct io_target *target = req->target; 271 struct spdk_bdev *bdev = target->bdev; 272 int rc; 273 274 rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt, 275 req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset), 276 bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL); 277 278 if (rc) { 279 g_completion_success = false; 280 wake_ut_thread(); 281 } 282 } 283 284 static void 285 sgl_chop_buffer(struct bdevio_request *req, int iov_len) 286 { 287 int data_len = req->data_len; 288 char *buf = req->buf; 289 290 req->iovcnt = 0; 291 if (!iov_len) { 292 return; 293 } 294 295 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) { 296 if (data_len < iov_len) { 297 iov_len = data_len; 298 } 299 300 req->iov[req->iovcnt].iov_base = buf; 301 req->iov[req->iovcnt].iov_len = iov_len; 302 303 buf += iov_len; 304 data_len -= iov_len; 305 } 306 307 CU_ASSERT_EQUAL_FATAL(data_len, 0); 308 } 309 310 static void 311 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len) 312 { 313 int data_len = req->data_len; 314 char *buf = req->fused_buf; 315 316 req->fused_iovcnt = 0; 317 if (!iov_len) { 318 return; 319 } 320 321 for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) { 322 if (data_len < iov_len) { 323 iov_len = data_len; 324 } 325 326 req->fused_iov[req->fused_iovcnt].iov_base = buf; 327 req->fused_iov[req->fused_iovcnt].iov_len = iov_len; 328 329 buf += iov_len; 330 data_len -= iov_len; 331 } 332 333 CU_ASSERT_EQUAL_FATAL(data_len, 0); 334 } 335 336 static void 337 blockdev_write(struct io_target *target, char *tx_buf, 338 uint64_t offset, int data_len, int iov_len) 339 { 340 struct bdevio_request req; 341 342 req.target = target; 343 req.buf = tx_buf; 344 req.data_len = data_len; 345 req.offset = offset; 346 sgl_chop_buffer(&req, iov_len); 347 348 g_completion_success = false; 349 350 execute_spdk_function(__blockdev_write, &req); 351 } 352 353 static void 354 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf, 355 uint64_t offset, int data_len, int iov_len) 356 { 357 struct bdevio_request req; 358 359 req.target = target; 360 req.buf = cmp_buf; 361 req.fused_buf = write_buf; 362 req.data_len = data_len; 363 req.offset = offset; 364 sgl_chop_buffer(&req, iov_len); 365 sgl_chop_fused_buffer(&req, iov_len); 366 367 g_completion_success = false; 368 369 execute_spdk_function(__blockdev_compare_and_write, &req); 370 } 371 372 static void 373 blockdev_write_zeroes(struct io_target *target, char *tx_buf, 374 uint64_t offset, int data_len) 375 { 376 struct bdevio_request req; 377 378 req.target = target; 379 req.buf = tx_buf; 380 req.data_len = data_len; 381 req.offset = offset; 382 383 g_completion_success = false; 384 385 execute_spdk_function(__blockdev_write_zeroes, &req); 386 } 387 388 static void 389 __blockdev_read(void *arg) 390 { 391 struct bdevio_request *req = arg; 392 struct io_target *target = req->target; 393 int rc; 394 395 if (req->iovcnt) { 396 rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 397 req->data_len, quick_test_complete, NULL); 398 } else { 399 rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset, 400 req->data_len, quick_test_complete, NULL); 401 } 402 403 if (rc) { 404 g_completion_success = false; 405 wake_ut_thread(); 406 } 407 } 408 409 static void 410 blockdev_read(struct io_target *target, char *rx_buf, 411 uint64_t offset, int data_len, int iov_len) 412 { 413 struct bdevio_request req; 414 415 req.target = target; 416 req.buf = rx_buf; 417 req.data_len = data_len; 418 req.offset = offset; 419 req.iovcnt = 0; 420 sgl_chop_buffer(&req, iov_len); 421 422 g_completion_success = false; 423 424 execute_spdk_function(__blockdev_read, &req); 425 } 426 427 static int 428 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length) 429 { 430 int rc; 431 rc = memcmp(rx_buf, tx_buf, data_length); 432 433 spdk_free(rx_buf); 434 spdk_free(tx_buf); 435 436 return rc; 437 } 438 439 static void 440 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset, 441 int expected_rc, bool write_zeroes) 442 { 443 struct io_target *target; 444 char *tx_buf = NULL; 445 char *rx_buf = NULL; 446 int rc; 447 448 target = g_current_io_target; 449 450 if (!write_zeroes) { 451 initialize_buffer(&tx_buf, pattern, data_length); 452 initialize_buffer(&rx_buf, 0, data_length); 453 454 blockdev_write(target, tx_buf, offset, data_length, iov_len); 455 } else { 456 initialize_buffer(&tx_buf, 0, data_length); 457 initialize_buffer(&rx_buf, pattern, data_length); 458 459 blockdev_write_zeroes(target, tx_buf, offset, data_length); 460 } 461 462 463 if (expected_rc == 0) { 464 CU_ASSERT_EQUAL(g_completion_success, true); 465 } else { 466 CU_ASSERT_EQUAL(g_completion_success, false); 467 } 468 blockdev_read(target, rx_buf, offset, data_length, iov_len); 469 470 if (expected_rc == 0) { 471 CU_ASSERT_EQUAL(g_completion_success, true); 472 } else { 473 CU_ASSERT_EQUAL(g_completion_success, false); 474 } 475 476 if (g_completion_success) { 477 rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length); 478 /* Assert the write by comparing it with values read 479 * from each blockdev */ 480 CU_ASSERT_EQUAL(rc, 0); 481 } 482 } 483 484 static void 485 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset) 486 { 487 struct io_target *target; 488 char *tx_buf = NULL; 489 char *write_buf = NULL; 490 char *rx_buf = NULL; 491 int rc; 492 493 target = g_current_io_target; 494 495 initialize_buffer(&tx_buf, 0xAA, data_length); 496 initialize_buffer(&rx_buf, 0, data_length); 497 initialize_buffer(&write_buf, 0xBB, data_length); 498 499 blockdev_write(target, tx_buf, offset, data_length, iov_len); 500 CU_ASSERT_EQUAL(g_completion_success, true); 501 502 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 503 CU_ASSERT_EQUAL(g_completion_success, true); 504 505 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 506 CU_ASSERT_EQUAL(g_completion_success, false); 507 508 blockdev_read(target, rx_buf, offset, data_length, iov_len); 509 CU_ASSERT_EQUAL(g_completion_success, true); 510 rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length); 511 /* Assert the write by comparing it with values read 512 * from each blockdev */ 513 CU_ASSERT_EQUAL(rc, 0); 514 } 515 516 static void 517 blockdev_write_read_block(void) 518 { 519 uint32_t data_length; 520 uint64_t offset; 521 int pattern; 522 int expected_rc; 523 struct io_target *target = g_current_io_target; 524 struct spdk_bdev *bdev = target->bdev; 525 526 /* Data size = 1 block */ 527 data_length = spdk_bdev_get_block_size(bdev); 528 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 529 offset = 0; 530 pattern = 0xA3; 531 /* Params are valid, hence the expected return value 532 * of write and read for all blockdevs is 0. */ 533 expected_rc = 0; 534 535 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 536 } 537 538 static void 539 blockdev_write_zeroes_read_block(void) 540 { 541 uint32_t data_length; 542 uint64_t offset; 543 int pattern; 544 int expected_rc; 545 struct io_target *target = g_current_io_target; 546 struct spdk_bdev *bdev = target->bdev; 547 548 /* Data size = 1 block */ 549 data_length = spdk_bdev_get_block_size(bdev); 550 offset = 0; 551 pattern = 0xA3; 552 /* Params are valid, hence the expected return value 553 * of write_zeroes and read for all blockdevs is 0. */ 554 expected_rc = 0; 555 556 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 557 } 558 559 /* 560 * This i/o will not have to split at the bdev layer. 561 */ 562 static void 563 blockdev_write_zeroes_read_no_split(void) 564 { 565 uint32_t data_length; 566 uint64_t offset; 567 int pattern; 568 int expected_rc; 569 struct io_target *target = g_current_io_target; 570 struct spdk_bdev *bdev = target->bdev; 571 572 /* Data size = block size aligned ZERO_BUFFER_SIZE */ 573 data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 574 data_length -= ZERO_BUFFER_SIZE % spdk_bdev_get_block_size(bdev); 575 offset = 0; 576 pattern = 0xA3; 577 /* Params are valid, hence the expected return value 578 * of write_zeroes and read for all blockdevs is 0. */ 579 expected_rc = 0; 580 581 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 582 } 583 584 /* 585 * This i/o will have to split at the bdev layer if 586 * write-zeroes is not supported by the bdev. 587 */ 588 static void 589 blockdev_write_zeroes_read_split(void) 590 { 591 uint32_t data_length; 592 uint64_t offset; 593 int pattern; 594 int expected_rc; 595 struct io_target *target = g_current_io_target; 596 struct spdk_bdev *bdev = target->bdev; 597 598 /* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */ 599 data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 600 data_length -= data_length % spdk_bdev_get_block_size(bdev); 601 offset = 0; 602 pattern = 0xA3; 603 /* Params are valid, hence the expected return value 604 * of write_zeroes and read for all blockdevs is 0. */ 605 expected_rc = 0; 606 607 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 608 } 609 610 /* 611 * This i/o will have to split at the bdev layer if 612 * write-zeroes is not supported by the bdev. It also 613 * tests a write size that is not an even multiple of 614 * the bdev layer zero buffer size. 615 */ 616 static void 617 blockdev_write_zeroes_read_split_partial(void) 618 { 619 uint32_t data_length; 620 uint64_t offset; 621 int pattern; 622 int expected_rc; 623 struct io_target *target = g_current_io_target; 624 struct spdk_bdev *bdev = target->bdev; 625 uint32_t block_size = spdk_bdev_get_block_size(bdev); 626 627 /* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */ 628 data_length = ZERO_BUFFER_SIZE * 7 / 2; 629 data_length -= data_length % block_size; 630 offset = 0; 631 pattern = 0xA3; 632 /* Params are valid, hence the expected return value 633 * of write_zeroes and read for all blockdevs is 0. */ 634 expected_rc = 0; 635 636 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 637 } 638 639 static void 640 blockdev_writev_readv_block(void) 641 { 642 uint32_t data_length, iov_len; 643 uint64_t offset; 644 int pattern; 645 int expected_rc; 646 struct io_target *target = g_current_io_target; 647 struct spdk_bdev *bdev = target->bdev; 648 649 /* Data size = 1 block */ 650 data_length = spdk_bdev_get_block_size(bdev); 651 iov_len = data_length; 652 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 653 offset = 0; 654 pattern = 0xA3; 655 /* Params are valid, hence the expected return value 656 * of write and read for all blockdevs is 0. */ 657 expected_rc = 0; 658 659 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 660 } 661 662 static void 663 blockdev_comparev_and_writev(void) 664 { 665 uint32_t data_length, iov_len; 666 uint64_t offset; 667 struct io_target *target = g_current_io_target; 668 struct spdk_bdev *bdev = target->bdev; 669 670 /* Data size = acwu size */ 671 data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev); 672 iov_len = data_length; 673 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 674 offset = 0; 675 676 blockdev_compare_and_write(data_length, iov_len, offset); 677 } 678 679 static void 680 blockdev_writev_readv_30x1block(void) 681 { 682 uint32_t data_length, iov_len; 683 uint64_t offset; 684 int pattern; 685 int expected_rc; 686 struct io_target *target = g_current_io_target; 687 struct spdk_bdev *bdev = target->bdev; 688 uint32_t block_size = spdk_bdev_get_block_size(bdev); 689 690 /* Data size = 30 * block size */ 691 data_length = block_size * 30; 692 iov_len = block_size; 693 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 694 offset = 0; 695 pattern = 0xA3; 696 /* Params are valid, hence the expected return value 697 * of write and read for all blockdevs is 0. */ 698 expected_rc = 0; 699 700 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 701 } 702 703 static void 704 blockdev_write_read_8blocks(void) 705 { 706 uint32_t data_length; 707 uint64_t offset; 708 int pattern; 709 int expected_rc; 710 struct io_target *target = g_current_io_target; 711 struct spdk_bdev *bdev = target->bdev; 712 713 /* Data size = 8 * block size */ 714 data_length = spdk_bdev_get_block_size(bdev) * 8; 715 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 716 offset = data_length; 717 pattern = 0xA3; 718 /* Params are valid, hence the expected return value 719 * of write and read for all blockdevs is 0. */ 720 expected_rc = 0; 721 722 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 723 } 724 725 static void 726 blockdev_writev_readv_8blocks(void) 727 { 728 uint32_t data_length, iov_len; 729 uint64_t offset; 730 int pattern; 731 int expected_rc; 732 struct io_target *target = g_current_io_target; 733 struct spdk_bdev *bdev = target->bdev; 734 735 /* Data size = 8 * block size */ 736 data_length = spdk_bdev_get_block_size(bdev) * 8; 737 iov_len = data_length; 738 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 739 offset = data_length; 740 pattern = 0xA3; 741 /* Params are valid, hence the expected return value 742 * of write and read for all blockdevs is 0. */ 743 expected_rc = 0; 744 745 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 746 } 747 748 static void 749 blockdev_write_read_size_gt_128k(void) 750 { 751 uint32_t data_length; 752 uint64_t offset; 753 int pattern; 754 int expected_rc; 755 struct io_target *target = g_current_io_target; 756 struct spdk_bdev *bdev = target->bdev; 757 uint32_t block_size = spdk_bdev_get_block_size(bdev); 758 759 /* Data size = block size aligned 128K + 1 block */ 760 data_length = 128 * 1024; 761 data_length -= data_length % block_size; 762 data_length += block_size; 763 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 764 offset = block_size * 2; 765 pattern = 0xA3; 766 /* Params are valid, hence the expected return value 767 * of write and read for all blockdevs is 0. */ 768 expected_rc = 0; 769 770 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 771 } 772 773 static void 774 blockdev_writev_readv_size_gt_128k(void) 775 { 776 uint32_t data_length, iov_len; 777 uint64_t offset; 778 int pattern; 779 int expected_rc; 780 struct io_target *target = g_current_io_target; 781 struct spdk_bdev *bdev = target->bdev; 782 uint32_t block_size = spdk_bdev_get_block_size(bdev); 783 784 /* Data size = block size aligned 128K + 1 block */ 785 data_length = 128 * 1024; 786 data_length -= data_length % block_size; 787 data_length += block_size; 788 iov_len = data_length; 789 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 790 offset = block_size * 2; 791 pattern = 0xA3; 792 /* Params are valid, hence the expected return value 793 * of write and read for all blockdevs is 0. */ 794 expected_rc = 0; 795 796 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 797 } 798 799 static void 800 blockdev_writev_readv_size_gt_128k_two_iov(void) 801 { 802 uint32_t data_length, iov_len; 803 uint64_t offset; 804 int pattern; 805 int expected_rc; 806 struct io_target *target = g_current_io_target; 807 struct spdk_bdev *bdev = target->bdev; 808 uint32_t block_size = spdk_bdev_get_block_size(bdev); 809 810 /* Data size = block size aligned 128K + 1 block */ 811 data_length = 128 * 1024; 812 data_length -= data_length % block_size; 813 iov_len = data_length; 814 data_length += block_size; 815 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 816 offset = block_size * 2; 817 pattern = 0xA3; 818 /* Params are valid, hence the expected return value 819 * of write and read for all blockdevs is 0. */ 820 expected_rc = 0; 821 822 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 823 } 824 825 static void 826 blockdev_write_read_invalid_size(void) 827 { 828 uint32_t data_length; 829 uint64_t offset; 830 int pattern; 831 int expected_rc; 832 struct io_target *target = g_current_io_target; 833 struct spdk_bdev *bdev = target->bdev; 834 uint32_t block_size = spdk_bdev_get_block_size(bdev); 835 836 /* Data size is not a multiple of the block size */ 837 data_length = block_size - 1; 838 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 839 offset = block_size * 2; 840 pattern = 0xA3; 841 /* Params are invalid, hence the expected return value 842 * of write and read for all blockdevs is < 0 */ 843 expected_rc = -1; 844 845 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 846 } 847 848 static void 849 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void) 850 { 851 struct io_target *target; 852 struct spdk_bdev *bdev; 853 char *tx_buf = NULL; 854 char *rx_buf = NULL; 855 uint64_t offset; 856 uint32_t block_size; 857 int rc; 858 859 target = g_current_io_target; 860 bdev = target->bdev; 861 862 block_size = spdk_bdev_get_block_size(bdev); 863 864 /* The start offset has been set to a marginal value 865 * such that offset + nbytes == Total size of 866 * blockdev. */ 867 offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size); 868 869 initialize_buffer(&tx_buf, 0xA3, block_size); 870 initialize_buffer(&rx_buf, 0, block_size); 871 872 blockdev_write(target, tx_buf, offset, block_size, 0); 873 CU_ASSERT_EQUAL(g_completion_success, true); 874 875 blockdev_read(target, rx_buf, offset, block_size, 0); 876 CU_ASSERT_EQUAL(g_completion_success, true); 877 878 rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size); 879 /* Assert the write by comparing it with values read 880 * from each blockdev */ 881 CU_ASSERT_EQUAL(rc, 0); 882 } 883 884 static void 885 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void) 886 { 887 struct io_target *target = g_current_io_target; 888 struct spdk_bdev *bdev = target->bdev; 889 char *tx_buf = NULL; 890 char *rx_buf = NULL; 891 int data_length; 892 uint64_t offset; 893 int pattern; 894 uint32_t block_size = spdk_bdev_get_block_size(bdev); 895 896 /* Tests the overflow condition of the blockdevs. */ 897 data_length = block_size * 2; 898 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 899 pattern = 0xA3; 900 901 target = g_current_io_target; 902 bdev = target->bdev; 903 904 /* The start offset has been set to a valid value 905 * but offset + nbytes is greater than the Total size 906 * of the blockdev. The test should fail. */ 907 offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size; 908 909 initialize_buffer(&tx_buf, pattern, data_length); 910 initialize_buffer(&rx_buf, 0, data_length); 911 912 blockdev_write(target, tx_buf, offset, data_length, 0); 913 CU_ASSERT_EQUAL(g_completion_success, false); 914 915 blockdev_read(target, rx_buf, offset, data_length, 0); 916 CU_ASSERT_EQUAL(g_completion_success, false); 917 } 918 919 static void 920 blockdev_write_read_max_offset(void) 921 { 922 int data_length; 923 uint64_t offset; 924 int pattern; 925 int expected_rc; 926 struct io_target *target = g_current_io_target; 927 struct spdk_bdev *bdev = target->bdev; 928 929 data_length = spdk_bdev_get_block_size(bdev); 930 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 931 /* The start offset has been set to UINT64_MAX such that 932 * adding nbytes wraps around and points to an invalid address. */ 933 offset = UINT64_MAX; 934 pattern = 0xA3; 935 /* Params are invalid, hence the expected return value 936 * of write and read for all blockdevs is < 0 */ 937 expected_rc = -1; 938 939 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 940 } 941 942 static void 943 blockdev_overlapped_write_read_2blocks(void) 944 { 945 int data_length; 946 uint64_t offset; 947 int pattern; 948 int expected_rc; 949 struct io_target *target = g_current_io_target; 950 struct spdk_bdev *bdev = target->bdev; 951 952 /* Data size = 2 blocks */ 953 data_length = spdk_bdev_get_block_size(bdev) * 2; 954 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 955 offset = 0; 956 pattern = 0xA3; 957 /* Params are valid, hence the expected return value 958 * of write and read for all blockdevs is 0. */ 959 expected_rc = 0; 960 /* Assert the write by comparing it with values read 961 * from the same offset for each blockdev */ 962 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 963 964 /* Overwrite the pattern 0xbb of size 2*block size on an address offset 965 * overlapping with the address written above and assert the new value in 966 * the overlapped address range */ 967 /* Populate 2*block size with value 0xBB */ 968 pattern = 0xBB; 969 /* Offset = 1 block; Overlap offset addresses and write value 0xbb */ 970 offset = spdk_bdev_get_block_size(bdev); 971 /* Assert the write by comparing it with values read 972 * from the overlapped offset for each blockdev */ 973 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 974 } 975 976 static void 977 __blockdev_reset(void *arg) 978 { 979 struct bdevio_request *req = arg; 980 struct io_target *target = req->target; 981 int rc; 982 983 rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL); 984 if (rc < 0) { 985 g_completion_success = false; 986 wake_ut_thread(); 987 } 988 } 989 990 static void 991 blockdev_test_reset(void) 992 { 993 struct bdevio_request req; 994 struct io_target *target; 995 bool reset_supported; 996 997 target = g_current_io_target; 998 req.target = target; 999 1000 reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET); 1001 g_completion_success = false; 1002 1003 execute_spdk_function(__blockdev_reset, &req); 1004 1005 CU_ASSERT_EQUAL(g_completion_success, reset_supported); 1006 } 1007 1008 struct bdevio_passthrough_request { 1009 struct spdk_nvme_cmd cmd; 1010 void *buf; 1011 uint32_t len; 1012 struct io_target *target; 1013 int sct; 1014 int sc; 1015 uint32_t cdw0; 1016 }; 1017 1018 static void 1019 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 1020 { 1021 struct bdevio_passthrough_request *pt_req = arg; 1022 1023 spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc); 1024 spdk_bdev_free_io(bdev_io); 1025 wake_ut_thread(); 1026 } 1027 1028 static void 1029 __blockdev_nvme_passthru(void *arg) 1030 { 1031 struct bdevio_passthrough_request *pt_req = arg; 1032 struct io_target *target = pt_req->target; 1033 int rc; 1034 1035 rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch, 1036 &pt_req->cmd, pt_req->buf, pt_req->len, 1037 nvme_pt_test_complete, pt_req); 1038 if (rc) { 1039 wake_ut_thread(); 1040 } 1041 } 1042 1043 static void 1044 blockdev_test_nvme_passthru_rw(void) 1045 { 1046 struct bdevio_passthrough_request pt_req; 1047 void *write_buf, *read_buf; 1048 struct io_target *target; 1049 1050 target = g_current_io_target; 1051 1052 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1053 return; 1054 } 1055 1056 memset(&pt_req, 0, sizeof(pt_req)); 1057 pt_req.target = target; 1058 pt_req.cmd.opc = SPDK_NVME_OPC_WRITE; 1059 pt_req.cmd.nsid = 1; 1060 *(uint64_t *)&pt_req.cmd.cdw10 = 4; 1061 pt_req.cmd.cdw12 = 0; 1062 1063 pt_req.len = spdk_bdev_get_block_size(target->bdev); 1064 write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1065 memset(write_buf, 0xA5, pt_req.len); 1066 pt_req.buf = write_buf; 1067 1068 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1069 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1070 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1071 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1072 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1073 1074 pt_req.cmd.opc = SPDK_NVME_OPC_READ; 1075 read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1076 pt_req.buf = read_buf; 1077 1078 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1079 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1080 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1081 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1082 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1083 1084 CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len)); 1085 spdk_free(read_buf); 1086 spdk_free(write_buf); 1087 } 1088 1089 static void 1090 blockdev_test_nvme_passthru_vendor_specific(void) 1091 { 1092 struct bdevio_passthrough_request pt_req; 1093 struct io_target *target; 1094 1095 target = g_current_io_target; 1096 1097 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1098 return; 1099 } 1100 1101 memset(&pt_req, 0, sizeof(pt_req)); 1102 pt_req.target = target; 1103 pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */ 1104 pt_req.cmd.nsid = 1; 1105 1106 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1107 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1108 pt_req.cdw0 = 0xbeef; 1109 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1110 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1111 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE); 1112 CU_ASSERT(pt_req.cdw0 == 0x0); 1113 } 1114 1115 static void 1116 __blockdev_nvme_admin_passthru(void *arg) 1117 { 1118 struct bdevio_passthrough_request *pt_req = arg; 1119 struct io_target *target = pt_req->target; 1120 int rc; 1121 1122 rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch, 1123 &pt_req->cmd, pt_req->buf, pt_req->len, 1124 nvme_pt_test_complete, pt_req); 1125 if (rc) { 1126 wake_ut_thread(); 1127 } 1128 } 1129 1130 static void 1131 blockdev_test_nvme_admin_passthru(void) 1132 { 1133 struct io_target *target; 1134 struct bdevio_passthrough_request pt_req; 1135 1136 target = g_current_io_target; 1137 1138 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) { 1139 return; 1140 } 1141 1142 memset(&pt_req, 0, sizeof(pt_req)); 1143 pt_req.target = target; 1144 pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY; 1145 pt_req.cmd.nsid = 0; 1146 *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR; 1147 1148 pt_req.len = sizeof(struct spdk_nvme_ctrlr_data); 1149 pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1150 1151 pt_req.sct = SPDK_NVME_SCT_GENERIC; 1152 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1153 execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req); 1154 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1155 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1156 } 1157 1158 static void 1159 __stop_init_thread(void *arg) 1160 { 1161 unsigned num_failures = g_num_failures; 1162 struct spdk_jsonrpc_request *request = arg; 1163 1164 g_num_failures = 0; 1165 1166 bdevio_cleanup_targets(); 1167 if (g_wait_for_tests && !g_shutdown) { 1168 /* Do not stop the app yet, wait for another RPC */ 1169 rpc_perform_tests_cb(num_failures, request); 1170 return; 1171 } 1172 spdk_app_stop(num_failures); 1173 } 1174 1175 static void 1176 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request) 1177 { 1178 g_num_failures = num_failures; 1179 1180 spdk_thread_send_msg(g_thread_init, __stop_init_thread, request); 1181 } 1182 1183 static int 1184 suite_init(void) 1185 { 1186 if (g_current_io_target == NULL) { 1187 g_current_io_target = g_io_targets; 1188 } 1189 return 0; 1190 } 1191 1192 static int 1193 suite_fini(void) 1194 { 1195 g_current_io_target = g_current_io_target->next; 1196 return 0; 1197 } 1198 1199 #define SUITE_NAME_MAX 64 1200 1201 static int 1202 __setup_ut_on_single_target(struct io_target *target) 1203 { 1204 unsigned rc = 0; 1205 CU_pSuite suite = NULL; 1206 char name[SUITE_NAME_MAX]; 1207 1208 snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev)); 1209 suite = CU_add_suite(name, suite_init, suite_fini); 1210 if (suite == NULL) { 1211 CU_cleanup_registry(); 1212 rc = CU_get_error(); 1213 return -rc; 1214 } 1215 1216 if ( 1217 CU_add_test(suite, "blockdev write read block", 1218 blockdev_write_read_block) == NULL 1219 || CU_add_test(suite, "blockdev write zeroes read block", 1220 blockdev_write_zeroes_read_block) == NULL 1221 || CU_add_test(suite, "blockdev write zeroes read no split", 1222 blockdev_write_zeroes_read_no_split) == NULL 1223 || CU_add_test(suite, "blockdev write zeroes read split", 1224 blockdev_write_zeroes_read_split) == NULL 1225 || CU_add_test(suite, "blockdev write zeroes read split partial", 1226 blockdev_write_zeroes_read_split_partial) == NULL 1227 || CU_add_test(suite, "blockdev reset", 1228 blockdev_test_reset) == NULL 1229 || CU_add_test(suite, "blockdev write read 8 blocks", 1230 blockdev_write_read_8blocks) == NULL 1231 || CU_add_test(suite, "blockdev write read size > 128k", 1232 blockdev_write_read_size_gt_128k) == NULL 1233 || CU_add_test(suite, "blockdev write read invalid size", 1234 blockdev_write_read_invalid_size) == NULL 1235 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev", 1236 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL 1237 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev", 1238 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL 1239 || CU_add_test(suite, "blockdev write read max offset", 1240 blockdev_write_read_max_offset) == NULL 1241 || CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset", 1242 blockdev_overlapped_write_read_2blocks) == NULL 1243 || CU_add_test(suite, "blockdev writev readv 8 blocks", 1244 blockdev_writev_readv_8blocks) == NULL 1245 || CU_add_test(suite, "blockdev writev readv 30 x 1block", 1246 blockdev_writev_readv_30x1block) == NULL 1247 || CU_add_test(suite, "blockdev writev readv block", 1248 blockdev_writev_readv_block) == NULL 1249 || CU_add_test(suite, "blockdev writev readv size > 128k", 1250 blockdev_writev_readv_size_gt_128k) == NULL 1251 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs", 1252 blockdev_writev_readv_size_gt_128k_two_iov) == NULL 1253 || CU_add_test(suite, "blockdev comparev and writev", 1254 blockdev_comparev_and_writev) == NULL 1255 || CU_add_test(suite, "blockdev nvme passthru rw", 1256 blockdev_test_nvme_passthru_rw) == NULL 1257 || CU_add_test(suite, "blockdev nvme passthru vendor specific", 1258 blockdev_test_nvme_passthru_vendor_specific) == NULL 1259 || CU_add_test(suite, "blockdev nvme admin passthru", 1260 blockdev_test_nvme_admin_passthru) == NULL 1261 ) { 1262 CU_cleanup_registry(); 1263 rc = CU_get_error(); 1264 return -rc; 1265 } 1266 return 0; 1267 } 1268 1269 static void 1270 __run_ut_thread(void *arg) 1271 { 1272 struct spdk_jsonrpc_request *request = arg; 1273 int rc = 0; 1274 struct io_target *target; 1275 unsigned num_failures; 1276 1277 if (CU_initialize_registry() != CUE_SUCCESS) { 1278 /* CUnit error, probably won't recover */ 1279 rc = CU_get_error(); 1280 stop_init_thread(-rc, request); 1281 } 1282 1283 target = g_io_targets; 1284 while (target != NULL) { 1285 rc = __setup_ut_on_single_target(target); 1286 if (rc < 0) { 1287 /* CUnit error, probably won't recover */ 1288 stop_init_thread(-rc, request); 1289 } 1290 target = target->next; 1291 } 1292 CU_basic_set_mode(CU_BRM_VERBOSE); 1293 CU_basic_run_tests(); 1294 num_failures = CU_get_number_of_failures(); 1295 CU_cleanup_registry(); 1296 1297 stop_init_thread(num_failures, request); 1298 } 1299 1300 static void 1301 __construct_targets(void *arg) 1302 { 1303 if (bdevio_construct_targets() < 0) { 1304 spdk_app_stop(-1); 1305 return; 1306 } 1307 1308 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL); 1309 } 1310 1311 static void 1312 test_main(void *arg1) 1313 { 1314 struct spdk_cpuset tmpmask = {}; 1315 uint32_t i; 1316 1317 pthread_mutex_init(&g_test_mutex, NULL); 1318 pthread_cond_init(&g_test_cond, NULL); 1319 1320 /* This test runs specifically on at least three cores. 1321 * g_thread_init is the app_thread on main core from event framework. 1322 * Next two are only for the tests and should always be on separate CPU cores. */ 1323 if (spdk_env_get_core_count() < 3) { 1324 spdk_app_stop(-1); 1325 return; 1326 } 1327 1328 SPDK_ENV_FOREACH_CORE(i) { 1329 if (i == spdk_env_get_current_core()) { 1330 g_thread_init = spdk_get_thread(); 1331 continue; 1332 } 1333 spdk_cpuset_zero(&tmpmask); 1334 spdk_cpuset_set_cpu(&tmpmask, i, true); 1335 if (g_thread_ut == NULL) { 1336 g_thread_ut = spdk_thread_create("ut_thread", &tmpmask); 1337 } else if (g_thread_io == NULL) { 1338 g_thread_io = spdk_thread_create("io_thread", &tmpmask); 1339 } 1340 1341 } 1342 1343 if (g_wait_for_tests) { 1344 /* Do not perform any tests until RPC is received */ 1345 return; 1346 } 1347 1348 spdk_thread_send_msg(g_thread_init, __construct_targets, NULL); 1349 } 1350 1351 static void 1352 bdevio_usage(void) 1353 { 1354 printf(" -w start bdevio app and wait for RPC to start the tests\n"); 1355 } 1356 1357 static int 1358 bdevio_parse_arg(int ch, char *arg) 1359 { 1360 switch (ch) { 1361 case 'w': 1362 g_wait_for_tests = true; 1363 break; 1364 default: 1365 return -EINVAL; 1366 } 1367 return 0; 1368 } 1369 1370 struct rpc_perform_tests { 1371 char *name; 1372 }; 1373 1374 static void 1375 free_rpc_perform_tests(struct rpc_perform_tests *r) 1376 { 1377 free(r->name); 1378 } 1379 1380 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = { 1381 {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true}, 1382 }; 1383 1384 static void 1385 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request) 1386 { 1387 struct spdk_json_write_ctx *w; 1388 1389 if (num_failures == 0) { 1390 w = spdk_jsonrpc_begin_result(request); 1391 spdk_json_write_uint32(w, num_failures); 1392 spdk_jsonrpc_end_result(request, w); 1393 } else { 1394 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1395 "%d test cases failed", num_failures); 1396 } 1397 } 1398 1399 static void 1400 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) 1401 { 1402 struct rpc_perform_tests req = {NULL}; 1403 struct spdk_bdev *bdev; 1404 int rc; 1405 1406 if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders, 1407 SPDK_COUNTOF(rpc_perform_tests_decoders), 1408 &req)) { 1409 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1410 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 1411 goto invalid; 1412 } 1413 1414 if (req.name) { 1415 bdev = spdk_bdev_get_by_name(req.name); 1416 if (bdev == NULL) { 1417 SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name); 1418 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1419 "Bdev '%s' does not exist: %s", 1420 req.name, spdk_strerror(ENODEV)); 1421 goto invalid; 1422 } 1423 rc = bdevio_construct_target(bdev); 1424 if (rc < 0) { 1425 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev)); 1426 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1427 "Could not construct target for bdev '%s': %s", 1428 spdk_bdev_get_name(bdev), spdk_strerror(-rc)); 1429 goto invalid; 1430 } 1431 } else { 1432 rc = bdevio_construct_targets(); 1433 if (rc < 0) { 1434 SPDK_ERRLOG("Could not construct targets for all bdevs\n"); 1435 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1436 "Could not construct targets for all bdevs: %s", 1437 spdk_strerror(-rc)); 1438 goto invalid; 1439 } 1440 } 1441 free_rpc_perform_tests(&req); 1442 1443 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request); 1444 1445 return; 1446 1447 invalid: 1448 free_rpc_perform_tests(&req); 1449 } 1450 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME) 1451 1452 static void 1453 spdk_bdevio_shutdown_cb(void) 1454 { 1455 g_shutdown = true; 1456 spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL); 1457 } 1458 1459 int 1460 main(int argc, char **argv) 1461 { 1462 int rc; 1463 struct spdk_app_opts opts = {}; 1464 1465 spdk_app_opts_init(&opts, sizeof(opts)); 1466 opts.name = "bdevio"; 1467 opts.reactor_mask = "0x7"; 1468 opts.shutdown_cb = spdk_bdevio_shutdown_cb; 1469 1470 if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL, 1471 bdevio_parse_arg, bdevio_usage)) != 1472 SPDK_APP_PARSE_ARGS_SUCCESS) { 1473 return rc; 1474 } 1475 1476 rc = spdk_app_start(&opts, test_main, NULL); 1477 spdk_app_fini(); 1478 1479 return rc; 1480 } 1481