1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/bdev.h" 10 #include "spdk/accel.h" 11 #include "spdk/env.h" 12 #include "spdk/log.h" 13 #include "spdk/thread.h" 14 #include "spdk/event.h" 15 #include "spdk/rpc.h" 16 #include "spdk/util.h" 17 #include "spdk/string.h" 18 19 #include "bdev_internal.h" 20 #include "CUnit/Basic.h" 21 22 #define BUFFER_IOVS 1024 23 #define BUFFER_SIZE 260 * 1024 24 #define BDEV_TASK_ARRAY_SIZE 2048 25 26 pthread_mutex_t g_test_mutex; 27 pthread_cond_t g_test_cond; 28 29 static struct spdk_thread *g_thread_init; 30 static struct spdk_thread *g_thread_ut; 31 static struct spdk_thread *g_thread_io; 32 static bool g_wait_for_tests = false; 33 static int g_num_failures = 0; 34 static bool g_shutdown = false; 35 36 struct io_target { 37 struct spdk_bdev *bdev; 38 struct spdk_bdev_desc *bdev_desc; 39 struct spdk_io_channel *ch; 40 struct io_target *next; 41 }; 42 43 struct bdevio_request { 44 char *buf; 45 char *fused_buf; 46 int data_len; 47 uint64_t offset; 48 struct iovec iov[BUFFER_IOVS]; 49 int iovcnt; 50 struct iovec fused_iov[BUFFER_IOVS]; 51 int fused_iovcnt; 52 struct io_target *target; 53 }; 54 55 struct io_target *g_io_targets = NULL; 56 struct io_target *g_current_io_target = NULL; 57 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request); 58 59 static void 60 execute_spdk_function(spdk_msg_fn fn, void *arg) 61 { 62 pthread_mutex_lock(&g_test_mutex); 63 spdk_thread_send_msg(g_thread_io, fn, arg); 64 pthread_cond_wait(&g_test_cond, &g_test_mutex); 65 pthread_mutex_unlock(&g_test_mutex); 66 } 67 68 static void 69 wake_ut_thread(void) 70 { 71 pthread_mutex_lock(&g_test_mutex); 72 pthread_cond_signal(&g_test_cond); 73 pthread_mutex_unlock(&g_test_mutex); 74 } 75 76 static void 77 __get_io_channel(void *arg) 78 { 79 struct io_target *target = arg; 80 81 target->ch = spdk_bdev_get_io_channel(target->bdev_desc); 82 assert(target->ch); 83 wake_ut_thread(); 84 } 85 86 static void 87 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 88 void *event_ctx) 89 { 90 } 91 92 static int 93 bdevio_construct_target(struct spdk_bdev *bdev) 94 { 95 struct io_target *target; 96 int rc; 97 uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev); 98 uint32_t block_size = spdk_bdev_get_block_size(bdev); 99 100 target = malloc(sizeof(struct io_target)); 101 if (target == NULL) { 102 return -ENOMEM; 103 } 104 105 rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL, 106 &target->bdev_desc); 107 if (rc != 0) { 108 free(target); 109 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 110 return rc; 111 } 112 113 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n", 114 spdk_bdev_get_name(bdev), 115 num_blocks, block_size, 116 (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024)); 117 118 target->bdev = bdev; 119 target->next = g_io_targets; 120 execute_spdk_function(__get_io_channel, target); 121 g_io_targets = target; 122 123 return 0; 124 } 125 126 static int 127 bdevio_construct_targets(void) 128 { 129 struct spdk_bdev *bdev; 130 int rc; 131 132 printf("I/O targets:\n"); 133 134 bdev = spdk_bdev_first_leaf(); 135 while (bdev != NULL) { 136 rc = bdevio_construct_target(bdev); 137 if (rc < 0) { 138 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 139 return rc; 140 } 141 bdev = spdk_bdev_next_leaf(bdev); 142 } 143 144 if (g_io_targets == NULL) { 145 SPDK_ERRLOG("No bdevs to perform tests on\n"); 146 return -1; 147 } 148 149 return 0; 150 } 151 152 static void 153 __put_io_channel(void *arg) 154 { 155 struct io_target *target = arg; 156 157 spdk_put_io_channel(target->ch); 158 wake_ut_thread(); 159 } 160 161 static void 162 bdevio_cleanup_targets(void) 163 { 164 struct io_target *target; 165 166 target = g_io_targets; 167 while (target != NULL) { 168 execute_spdk_function(__put_io_channel, target); 169 spdk_bdev_close(target->bdev_desc); 170 g_io_targets = target->next; 171 free(target); 172 target = g_io_targets; 173 } 174 } 175 176 static bool g_completion_success; 177 178 static void 179 initialize_buffer(char **buf, int pattern, int size) 180 { 181 *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 182 memset(*buf, pattern, size); 183 } 184 185 static void 186 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 187 { 188 g_completion_success = success; 189 spdk_bdev_free_io(bdev_io); 190 wake_ut_thread(); 191 } 192 193 static uint64_t 194 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes) 195 { 196 uint32_t block_size = spdk_bdev_get_block_size(bdev); 197 198 CU_ASSERT(bytes % block_size == 0); 199 return bytes / block_size; 200 } 201 202 static void 203 __blockdev_write(void *arg) 204 { 205 struct bdevio_request *req = arg; 206 struct io_target *target = req->target; 207 int rc; 208 209 if (req->iovcnt) { 210 rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 211 req->data_len, quick_test_complete, NULL); 212 } else { 213 rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset, 214 req->data_len, quick_test_complete, NULL); 215 } 216 217 if (rc) { 218 g_completion_success = false; 219 wake_ut_thread(); 220 } 221 } 222 223 static void 224 __blockdev_write_zeroes(void *arg) 225 { 226 struct bdevio_request *req = arg; 227 struct io_target *target = req->target; 228 int rc; 229 230 rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset, 231 req->data_len, quick_test_complete, NULL); 232 if (rc) { 233 g_completion_success = false; 234 wake_ut_thread(); 235 } 236 } 237 238 static void 239 __blockdev_compare_and_write(void *arg) 240 { 241 struct bdevio_request *req = arg; 242 struct io_target *target = req->target; 243 struct spdk_bdev *bdev = target->bdev; 244 int rc; 245 246 rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt, 247 req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset), 248 bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL); 249 250 if (rc) { 251 g_completion_success = false; 252 wake_ut_thread(); 253 } 254 } 255 256 static void 257 sgl_chop_buffer(struct bdevio_request *req, int iov_len) 258 { 259 int data_len = req->data_len; 260 char *buf = req->buf; 261 262 req->iovcnt = 0; 263 if (!iov_len) { 264 return; 265 } 266 267 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) { 268 if (data_len < iov_len) { 269 iov_len = data_len; 270 } 271 272 req->iov[req->iovcnt].iov_base = buf; 273 req->iov[req->iovcnt].iov_len = iov_len; 274 275 buf += iov_len; 276 data_len -= iov_len; 277 } 278 279 CU_ASSERT_EQUAL_FATAL(data_len, 0); 280 } 281 282 static void 283 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len) 284 { 285 int data_len = req->data_len; 286 char *buf = req->fused_buf; 287 288 req->fused_iovcnt = 0; 289 if (!iov_len) { 290 return; 291 } 292 293 for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) { 294 if (data_len < iov_len) { 295 iov_len = data_len; 296 } 297 298 req->fused_iov[req->fused_iovcnt].iov_base = buf; 299 req->fused_iov[req->fused_iovcnt].iov_len = iov_len; 300 301 buf += iov_len; 302 data_len -= iov_len; 303 } 304 305 CU_ASSERT_EQUAL_FATAL(data_len, 0); 306 } 307 308 static void 309 blockdev_write(struct io_target *target, char *tx_buf, 310 uint64_t offset, int data_len, int iov_len) 311 { 312 struct bdevio_request req; 313 314 req.target = target; 315 req.buf = tx_buf; 316 req.data_len = data_len; 317 req.offset = offset; 318 sgl_chop_buffer(&req, iov_len); 319 320 g_completion_success = false; 321 322 execute_spdk_function(__blockdev_write, &req); 323 } 324 325 static void 326 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf, 327 uint64_t offset, int data_len, int iov_len) 328 { 329 struct bdevio_request req; 330 331 req.target = target; 332 req.buf = cmp_buf; 333 req.fused_buf = write_buf; 334 req.data_len = data_len; 335 req.offset = offset; 336 sgl_chop_buffer(&req, iov_len); 337 sgl_chop_fused_buffer(&req, iov_len); 338 339 g_completion_success = false; 340 341 execute_spdk_function(__blockdev_compare_and_write, &req); 342 } 343 344 static void 345 blockdev_write_zeroes(struct io_target *target, char *tx_buf, 346 uint64_t offset, int data_len) 347 { 348 struct bdevio_request req; 349 350 req.target = target; 351 req.buf = tx_buf; 352 req.data_len = data_len; 353 req.offset = offset; 354 355 g_completion_success = false; 356 357 execute_spdk_function(__blockdev_write_zeroes, &req); 358 } 359 360 static void 361 __blockdev_read(void *arg) 362 { 363 struct bdevio_request *req = arg; 364 struct io_target *target = req->target; 365 int rc; 366 367 if (req->iovcnt) { 368 rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 369 req->data_len, quick_test_complete, NULL); 370 } else { 371 rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset, 372 req->data_len, quick_test_complete, NULL); 373 } 374 375 if (rc) { 376 g_completion_success = false; 377 wake_ut_thread(); 378 } 379 } 380 381 static void 382 blockdev_read(struct io_target *target, char *rx_buf, 383 uint64_t offset, int data_len, int iov_len) 384 { 385 struct bdevio_request req; 386 387 req.target = target; 388 req.buf = rx_buf; 389 req.data_len = data_len; 390 req.offset = offset; 391 req.iovcnt = 0; 392 sgl_chop_buffer(&req, iov_len); 393 394 g_completion_success = false; 395 396 execute_spdk_function(__blockdev_read, &req); 397 } 398 399 static int 400 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length) 401 { 402 return memcmp(rx_buf, tx_buf, data_length); 403 } 404 405 static void 406 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset, 407 int expected_rc, bool write_zeroes) 408 { 409 struct io_target *target; 410 char *tx_buf = NULL; 411 char *rx_buf = NULL; 412 int rc; 413 414 target = g_current_io_target; 415 416 if (!write_zeroes) { 417 initialize_buffer(&tx_buf, pattern, data_length); 418 initialize_buffer(&rx_buf, 0, data_length); 419 420 blockdev_write(target, tx_buf, offset, data_length, iov_len); 421 } else { 422 initialize_buffer(&tx_buf, 0, data_length); 423 initialize_buffer(&rx_buf, pattern, data_length); 424 425 blockdev_write_zeroes(target, tx_buf, offset, data_length); 426 } 427 428 429 if (expected_rc == 0) { 430 CU_ASSERT_EQUAL(g_completion_success, true); 431 } else { 432 CU_ASSERT_EQUAL(g_completion_success, false); 433 } 434 blockdev_read(target, rx_buf, offset, data_length, iov_len); 435 436 if (expected_rc == 0) { 437 CU_ASSERT_EQUAL(g_completion_success, true); 438 } else { 439 CU_ASSERT_EQUAL(g_completion_success, false); 440 } 441 442 if (g_completion_success) { 443 rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length); 444 /* Assert the write by comparing it with values read 445 * from each blockdev */ 446 CU_ASSERT_EQUAL(rc, 0); 447 } 448 449 spdk_free(rx_buf); 450 spdk_free(tx_buf); 451 } 452 453 static void 454 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset) 455 { 456 struct io_target *target; 457 char *tx_buf = NULL; 458 char *write_buf = NULL; 459 char *rx_buf = NULL; 460 int rc; 461 462 target = g_current_io_target; 463 464 initialize_buffer(&tx_buf, 0xAA, data_length); 465 initialize_buffer(&rx_buf, 0, data_length); 466 initialize_buffer(&write_buf, 0xBB, data_length); 467 468 blockdev_write(target, tx_buf, offset, data_length, iov_len); 469 CU_ASSERT_EQUAL(g_completion_success, true); 470 471 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 472 CU_ASSERT_EQUAL(g_completion_success, true); 473 474 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 475 CU_ASSERT_EQUAL(g_completion_success, false); 476 477 blockdev_read(target, rx_buf, offset, data_length, iov_len); 478 CU_ASSERT_EQUAL(g_completion_success, true); 479 rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length); 480 /* Assert the write by comparing it with values read 481 * from each blockdev */ 482 CU_ASSERT_EQUAL(rc, 0); 483 484 spdk_free(rx_buf); 485 spdk_free(tx_buf); 486 spdk_free(write_buf); 487 } 488 489 static void 490 blockdev_write_read_block(void) 491 { 492 uint32_t data_length; 493 uint64_t offset; 494 int pattern; 495 int expected_rc; 496 struct io_target *target = g_current_io_target; 497 struct spdk_bdev *bdev = target->bdev; 498 499 /* Data size = 1 block */ 500 data_length = spdk_bdev_get_block_size(bdev); 501 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 502 offset = 0; 503 pattern = 0xA3; 504 /* Params are valid, hence the expected return value 505 * of write and read for all blockdevs is 0. */ 506 expected_rc = 0; 507 508 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 509 } 510 511 static void 512 blockdev_write_zeroes_read_block(void) 513 { 514 uint32_t data_length; 515 uint64_t offset; 516 int pattern; 517 int expected_rc; 518 struct io_target *target = g_current_io_target; 519 struct spdk_bdev *bdev = target->bdev; 520 521 /* Data size = 1 block */ 522 data_length = spdk_bdev_get_block_size(bdev); 523 offset = 0; 524 pattern = 0xA3; 525 /* Params are valid, hence the expected return value 526 * of write_zeroes and read for all blockdevs is 0. */ 527 expected_rc = 0; 528 529 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 530 } 531 532 /* 533 * This i/o will not have to split at the bdev layer. 534 */ 535 static void 536 blockdev_write_zeroes_read_no_split(void) 537 { 538 uint32_t data_length; 539 uint64_t offset; 540 int pattern; 541 int expected_rc; 542 struct io_target *target = g_current_io_target; 543 struct spdk_bdev *bdev = target->bdev; 544 545 /* Data size = block size aligned ZERO_BUFFER_SIZE */ 546 data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 547 data_length -= ZERO_BUFFER_SIZE % spdk_bdev_get_block_size(bdev); 548 offset = 0; 549 pattern = 0xA3; 550 /* Params are valid, hence the expected return value 551 * of write_zeroes and read for all blockdevs is 0. */ 552 expected_rc = 0; 553 554 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 555 } 556 557 /* 558 * This i/o will have to split at the bdev layer if 559 * write-zeroes is not supported by the bdev. 560 */ 561 static void 562 blockdev_write_zeroes_read_split(void) 563 { 564 uint32_t data_length; 565 uint64_t offset; 566 int pattern; 567 int expected_rc; 568 struct io_target *target = g_current_io_target; 569 struct spdk_bdev *bdev = target->bdev; 570 571 /* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */ 572 data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 573 data_length -= data_length % spdk_bdev_get_block_size(bdev); 574 offset = 0; 575 pattern = 0xA3; 576 /* Params are valid, hence the expected return value 577 * of write_zeroes and read for all blockdevs is 0. */ 578 expected_rc = 0; 579 580 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 581 } 582 583 /* 584 * This i/o will have to split at the bdev layer if 585 * write-zeroes is not supported by the bdev. It also 586 * tests a write size that is not an even multiple of 587 * the bdev layer zero buffer size. 588 */ 589 static void 590 blockdev_write_zeroes_read_split_partial(void) 591 { 592 uint32_t data_length; 593 uint64_t offset; 594 int pattern; 595 int expected_rc; 596 struct io_target *target = g_current_io_target; 597 struct spdk_bdev *bdev = target->bdev; 598 uint32_t block_size = spdk_bdev_get_block_size(bdev); 599 600 /* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */ 601 data_length = ZERO_BUFFER_SIZE * 7 / 2; 602 data_length -= data_length % block_size; 603 offset = 0; 604 pattern = 0xA3; 605 /* Params are valid, hence the expected return value 606 * of write_zeroes and read for all blockdevs is 0. */ 607 expected_rc = 0; 608 609 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 610 } 611 612 static void 613 blockdev_writev_readv_block(void) 614 { 615 uint32_t data_length, iov_len; 616 uint64_t offset; 617 int pattern; 618 int expected_rc; 619 struct io_target *target = g_current_io_target; 620 struct spdk_bdev *bdev = target->bdev; 621 622 /* Data size = 1 block */ 623 data_length = spdk_bdev_get_block_size(bdev); 624 iov_len = data_length; 625 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 626 offset = 0; 627 pattern = 0xA3; 628 /* Params are valid, hence the expected return value 629 * of write and read for all blockdevs is 0. */ 630 expected_rc = 0; 631 632 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 633 } 634 635 static void 636 blockdev_comparev_and_writev(void) 637 { 638 uint32_t data_length, iov_len; 639 uint64_t offset; 640 struct io_target *target = g_current_io_target; 641 struct spdk_bdev *bdev = target->bdev; 642 643 if (spdk_bdev_is_md_separate(bdev)) { 644 /* TODO: remove this check once bdev layer properly supports 645 * compare and write for bdevs with separate md. 646 */ 647 SPDK_ERRLOG("skipping comparev_and_writev on bdev %s since it has\n" 648 "separate metadata which is not supported yet.\n", 649 spdk_bdev_get_name(bdev)); 650 return; 651 } 652 653 /* Data size = acwu size */ 654 data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev); 655 iov_len = data_length; 656 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 657 offset = 0; 658 659 blockdev_compare_and_write(data_length, iov_len, offset); 660 } 661 662 static void 663 blockdev_writev_readv_30x1block(void) 664 { 665 uint32_t data_length, iov_len; 666 uint64_t offset; 667 int pattern; 668 int expected_rc; 669 struct io_target *target = g_current_io_target; 670 struct spdk_bdev *bdev = target->bdev; 671 uint32_t block_size = spdk_bdev_get_block_size(bdev); 672 673 /* Data size = 30 * block size */ 674 data_length = block_size * 30; 675 iov_len = block_size; 676 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 677 offset = 0; 678 pattern = 0xA3; 679 /* Params are valid, hence the expected return value 680 * of write and read for all blockdevs is 0. */ 681 expected_rc = 0; 682 683 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 684 } 685 686 static void 687 blockdev_write_read_8blocks(void) 688 { 689 uint32_t data_length; 690 uint64_t offset; 691 int pattern; 692 int expected_rc; 693 struct io_target *target = g_current_io_target; 694 struct spdk_bdev *bdev = target->bdev; 695 696 /* Data size = 8 * block size */ 697 data_length = spdk_bdev_get_block_size(bdev) * 8; 698 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 699 offset = data_length; 700 pattern = 0xA3; 701 /* Params are valid, hence the expected return value 702 * of write and read for all blockdevs is 0. */ 703 expected_rc = 0; 704 705 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 706 } 707 708 static void 709 blockdev_writev_readv_8blocks(void) 710 { 711 uint32_t data_length, iov_len; 712 uint64_t offset; 713 int pattern; 714 int expected_rc; 715 struct io_target *target = g_current_io_target; 716 struct spdk_bdev *bdev = target->bdev; 717 718 /* Data size = 8 * block size */ 719 data_length = spdk_bdev_get_block_size(bdev) * 8; 720 iov_len = data_length; 721 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 722 offset = data_length; 723 pattern = 0xA3; 724 /* Params are valid, hence the expected return value 725 * of write and read for all blockdevs is 0. */ 726 expected_rc = 0; 727 728 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 729 } 730 731 static void 732 blockdev_write_read_size_gt_128k(void) 733 { 734 uint32_t data_length; 735 uint64_t offset; 736 int pattern; 737 int expected_rc; 738 struct io_target *target = g_current_io_target; 739 struct spdk_bdev *bdev = target->bdev; 740 uint32_t block_size = spdk_bdev_get_block_size(bdev); 741 742 /* Data size = block size aligned 128K + 1 block */ 743 data_length = 128 * 1024; 744 data_length -= data_length % block_size; 745 data_length += block_size; 746 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 747 offset = block_size * 2; 748 pattern = 0xA3; 749 /* Params are valid, hence the expected return value 750 * of write and read for all blockdevs is 0. */ 751 expected_rc = 0; 752 753 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 754 } 755 756 static void 757 blockdev_writev_readv_size_gt_128k(void) 758 { 759 uint32_t data_length, iov_len; 760 uint64_t offset; 761 int pattern; 762 int expected_rc; 763 struct io_target *target = g_current_io_target; 764 struct spdk_bdev *bdev = target->bdev; 765 uint32_t block_size = spdk_bdev_get_block_size(bdev); 766 767 /* Data size = block size aligned 128K + 1 block */ 768 data_length = 128 * 1024; 769 data_length -= data_length % block_size; 770 data_length += block_size; 771 iov_len = data_length; 772 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 773 offset = block_size * 2; 774 pattern = 0xA3; 775 /* Params are valid, hence the expected return value 776 * of write and read for all blockdevs is 0. */ 777 expected_rc = 0; 778 779 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 780 } 781 782 static void 783 blockdev_writev_readv_size_gt_128k_two_iov(void) 784 { 785 uint32_t data_length, iov_len; 786 uint64_t offset; 787 int pattern; 788 int expected_rc; 789 struct io_target *target = g_current_io_target; 790 struct spdk_bdev *bdev = target->bdev; 791 uint32_t block_size = spdk_bdev_get_block_size(bdev); 792 793 /* Data size = block size aligned 128K + 1 block */ 794 data_length = 128 * 1024; 795 data_length -= data_length % block_size; 796 iov_len = data_length; 797 data_length += block_size; 798 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 799 offset = block_size * 2; 800 pattern = 0xA3; 801 /* Params are valid, hence the expected return value 802 * of write and read for all blockdevs is 0. */ 803 expected_rc = 0; 804 805 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 806 } 807 808 static void 809 blockdev_write_read_invalid_size(void) 810 { 811 uint32_t data_length; 812 uint64_t offset; 813 int pattern; 814 int expected_rc; 815 struct io_target *target = g_current_io_target; 816 struct spdk_bdev *bdev = target->bdev; 817 uint32_t block_size = spdk_bdev_get_block_size(bdev); 818 819 /* Data size is not a multiple of the block size */ 820 data_length = block_size - 1; 821 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 822 offset = block_size * 2; 823 pattern = 0xA3; 824 /* Params are invalid, hence the expected return value 825 * of write and read for all blockdevs is < 0 */ 826 expected_rc = -1; 827 828 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 829 } 830 831 static void 832 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void) 833 { 834 struct io_target *target; 835 struct spdk_bdev *bdev; 836 char *tx_buf = NULL; 837 char *rx_buf = NULL; 838 uint64_t offset; 839 uint32_t block_size; 840 int rc; 841 842 target = g_current_io_target; 843 bdev = target->bdev; 844 845 block_size = spdk_bdev_get_block_size(bdev); 846 847 /* The start offset has been set to a marginal value 848 * such that offset + nbytes == Total size of 849 * blockdev. */ 850 offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size); 851 852 initialize_buffer(&tx_buf, 0xA3, block_size); 853 initialize_buffer(&rx_buf, 0, block_size); 854 855 blockdev_write(target, tx_buf, offset, block_size, 0); 856 CU_ASSERT_EQUAL(g_completion_success, true); 857 858 blockdev_read(target, rx_buf, offset, block_size, 0); 859 CU_ASSERT_EQUAL(g_completion_success, true); 860 861 rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size); 862 /* Assert the write by comparing it with values read 863 * from each blockdev */ 864 CU_ASSERT_EQUAL(rc, 0); 865 866 spdk_free(tx_buf); 867 spdk_free(rx_buf); 868 } 869 870 static void 871 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void) 872 { 873 struct io_target *target = g_current_io_target; 874 struct spdk_bdev *bdev = target->bdev; 875 char *tx_buf = NULL; 876 char *rx_buf = NULL; 877 int data_length; 878 uint64_t offset; 879 int pattern; 880 uint32_t block_size = spdk_bdev_get_block_size(bdev); 881 882 /* Tests the overflow condition of the blockdevs. */ 883 data_length = block_size * 2; 884 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 885 pattern = 0xA3; 886 887 target = g_current_io_target; 888 bdev = target->bdev; 889 890 /* The start offset has been set to a valid value 891 * but offset + nbytes is greater than the Total size 892 * of the blockdev. The test should fail. */ 893 offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size; 894 895 initialize_buffer(&tx_buf, pattern, data_length); 896 initialize_buffer(&rx_buf, 0, data_length); 897 898 blockdev_write(target, tx_buf, offset, data_length, 0); 899 CU_ASSERT_EQUAL(g_completion_success, false); 900 901 blockdev_read(target, rx_buf, offset, data_length, 0); 902 CU_ASSERT_EQUAL(g_completion_success, false); 903 904 spdk_free(tx_buf); 905 spdk_free(rx_buf); 906 } 907 908 static void 909 blockdev_write_read_max_offset(void) 910 { 911 int data_length; 912 uint64_t offset; 913 int pattern; 914 int expected_rc; 915 struct io_target *target = g_current_io_target; 916 struct spdk_bdev *bdev = target->bdev; 917 918 data_length = spdk_bdev_get_block_size(bdev); 919 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 920 /* The start offset has been set to UINT64_MAX such that 921 * adding nbytes wraps around and points to an invalid address. */ 922 offset = UINT64_MAX; 923 pattern = 0xA3; 924 /* Params are invalid, hence the expected return value 925 * of write and read for all blockdevs is < 0 */ 926 expected_rc = -1; 927 928 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 929 } 930 931 static void 932 blockdev_overlapped_write_read_2blocks(void) 933 { 934 int data_length; 935 uint64_t offset; 936 int pattern; 937 int expected_rc; 938 struct io_target *target = g_current_io_target; 939 struct spdk_bdev *bdev = target->bdev; 940 941 /* Data size = 2 blocks */ 942 data_length = spdk_bdev_get_block_size(bdev) * 2; 943 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 944 offset = 0; 945 pattern = 0xA3; 946 /* Params are valid, hence the expected return value 947 * of write and read for all blockdevs is 0. */ 948 expected_rc = 0; 949 /* Assert the write by comparing it with values read 950 * from the same offset for each blockdev */ 951 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 952 953 /* Overwrite the pattern 0xbb of size 2*block size on an address offset 954 * overlapping with the address written above and assert the new value in 955 * the overlapped address range */ 956 /* Populate 2*block size with value 0xBB */ 957 pattern = 0xBB; 958 /* Offset = 1 block; Overlap offset addresses and write value 0xbb */ 959 offset = spdk_bdev_get_block_size(bdev); 960 /* Assert the write by comparing it with values read 961 * from the overlapped offset for each blockdev */ 962 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 963 } 964 965 static void 966 __blockdev_reset(void *arg) 967 { 968 struct bdevio_request *req = arg; 969 struct io_target *target = req->target; 970 int rc; 971 972 rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL); 973 if (rc < 0) { 974 g_completion_success = false; 975 wake_ut_thread(); 976 } 977 } 978 979 static void 980 blockdev_test_reset(void) 981 { 982 struct bdevio_request req; 983 struct io_target *target; 984 bool reset_supported; 985 986 target = g_current_io_target; 987 req.target = target; 988 989 reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET); 990 g_completion_success = false; 991 992 execute_spdk_function(__blockdev_reset, &req); 993 994 CU_ASSERT_EQUAL(g_completion_success, reset_supported); 995 } 996 997 struct bdevio_passthrough_request { 998 struct spdk_nvme_cmd cmd; 999 void *buf; 1000 uint32_t len; 1001 struct io_target *target; 1002 int sct; 1003 int sc; 1004 uint32_t cdw0; 1005 }; 1006 1007 static void 1008 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 1009 { 1010 struct bdevio_passthrough_request *pt_req = arg; 1011 1012 spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc); 1013 spdk_bdev_free_io(bdev_io); 1014 wake_ut_thread(); 1015 } 1016 1017 static void 1018 __blockdev_nvme_passthru(void *arg) 1019 { 1020 struct bdevio_passthrough_request *pt_req = arg; 1021 struct io_target *target = pt_req->target; 1022 int rc; 1023 1024 rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch, 1025 &pt_req->cmd, pt_req->buf, pt_req->len, 1026 nvme_pt_test_complete, pt_req); 1027 if (rc) { 1028 wake_ut_thread(); 1029 } 1030 } 1031 1032 static void 1033 blockdev_test_nvme_passthru_rw(void) 1034 { 1035 struct bdevio_passthrough_request pt_req; 1036 void *write_buf, *read_buf; 1037 struct io_target *target; 1038 1039 target = g_current_io_target; 1040 1041 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1042 return; 1043 } 1044 1045 memset(&pt_req, 0, sizeof(pt_req)); 1046 pt_req.target = target; 1047 pt_req.cmd.opc = SPDK_NVME_OPC_WRITE; 1048 pt_req.cmd.nsid = 1; 1049 *(uint64_t *)&pt_req.cmd.cdw10 = 4; 1050 pt_req.cmd.cdw12 = 0; 1051 1052 pt_req.len = spdk_bdev_get_block_size(target->bdev); 1053 write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1054 memset(write_buf, 0xA5, pt_req.len); 1055 pt_req.buf = write_buf; 1056 1057 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1058 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1059 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1060 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1061 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1062 1063 pt_req.cmd.opc = SPDK_NVME_OPC_READ; 1064 read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1065 pt_req.buf = read_buf; 1066 1067 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1068 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1069 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1070 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1071 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1072 1073 CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len)); 1074 spdk_free(read_buf); 1075 spdk_free(write_buf); 1076 } 1077 1078 static void 1079 blockdev_test_nvme_passthru_vendor_specific(void) 1080 { 1081 struct bdevio_passthrough_request pt_req; 1082 struct io_target *target; 1083 1084 target = g_current_io_target; 1085 1086 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1087 return; 1088 } 1089 1090 memset(&pt_req, 0, sizeof(pt_req)); 1091 pt_req.target = target; 1092 pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */ 1093 pt_req.cmd.nsid = 1; 1094 1095 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1096 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1097 pt_req.cdw0 = 0xbeef; 1098 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1099 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1100 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE); 1101 CU_ASSERT(pt_req.cdw0 == 0x0); 1102 } 1103 1104 static void 1105 __blockdev_nvme_admin_passthru(void *arg) 1106 { 1107 struct bdevio_passthrough_request *pt_req = arg; 1108 struct io_target *target = pt_req->target; 1109 int rc; 1110 1111 rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch, 1112 &pt_req->cmd, pt_req->buf, pt_req->len, 1113 nvme_pt_test_complete, pt_req); 1114 if (rc) { 1115 wake_ut_thread(); 1116 } 1117 } 1118 1119 static void 1120 blockdev_test_nvme_admin_passthru(void) 1121 { 1122 struct io_target *target; 1123 struct bdevio_passthrough_request pt_req; 1124 1125 target = g_current_io_target; 1126 1127 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) { 1128 return; 1129 } 1130 1131 memset(&pt_req, 0, sizeof(pt_req)); 1132 pt_req.target = target; 1133 pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY; 1134 pt_req.cmd.nsid = 0; 1135 *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR; 1136 1137 pt_req.len = sizeof(struct spdk_nvme_ctrlr_data); 1138 pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1139 1140 pt_req.sct = SPDK_NVME_SCT_GENERIC; 1141 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1142 execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req); 1143 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1144 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1145 } 1146 1147 static void 1148 __stop_init_thread(void *arg) 1149 { 1150 unsigned num_failures = g_num_failures; 1151 struct spdk_jsonrpc_request *request = arg; 1152 1153 g_num_failures = 0; 1154 1155 bdevio_cleanup_targets(); 1156 if (g_wait_for_tests && !g_shutdown) { 1157 /* Do not stop the app yet, wait for another RPC */ 1158 rpc_perform_tests_cb(num_failures, request); 1159 return; 1160 } 1161 spdk_app_stop(num_failures); 1162 } 1163 1164 static void 1165 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request) 1166 { 1167 g_num_failures = num_failures; 1168 1169 spdk_thread_send_msg(g_thread_init, __stop_init_thread, request); 1170 } 1171 1172 static int 1173 suite_init(void) 1174 { 1175 if (g_current_io_target == NULL) { 1176 g_current_io_target = g_io_targets; 1177 } 1178 return 0; 1179 } 1180 1181 static int 1182 suite_fini(void) 1183 { 1184 g_current_io_target = g_current_io_target->next; 1185 return 0; 1186 } 1187 1188 #define SUITE_NAME_MAX 64 1189 1190 static int 1191 __setup_ut_on_single_target(struct io_target *target) 1192 { 1193 unsigned rc = 0; 1194 CU_pSuite suite = NULL; 1195 char name[SUITE_NAME_MAX]; 1196 1197 snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev)); 1198 suite = CU_add_suite(name, suite_init, suite_fini); 1199 if (suite == NULL) { 1200 CU_cleanup_registry(); 1201 rc = CU_get_error(); 1202 return -rc; 1203 } 1204 1205 if ( 1206 CU_add_test(suite, "blockdev write read block", 1207 blockdev_write_read_block) == NULL 1208 || CU_add_test(suite, "blockdev write zeroes read block", 1209 blockdev_write_zeroes_read_block) == NULL 1210 || CU_add_test(suite, "blockdev write zeroes read no split", 1211 blockdev_write_zeroes_read_no_split) == NULL 1212 || CU_add_test(suite, "blockdev write zeroes read split", 1213 blockdev_write_zeroes_read_split) == NULL 1214 || CU_add_test(suite, "blockdev write zeroes read split partial", 1215 blockdev_write_zeroes_read_split_partial) == NULL 1216 || CU_add_test(suite, "blockdev reset", 1217 blockdev_test_reset) == NULL 1218 || CU_add_test(suite, "blockdev write read 8 blocks", 1219 blockdev_write_read_8blocks) == NULL 1220 || CU_add_test(suite, "blockdev write read size > 128k", 1221 blockdev_write_read_size_gt_128k) == NULL 1222 || CU_add_test(suite, "blockdev write read invalid size", 1223 blockdev_write_read_invalid_size) == NULL 1224 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev", 1225 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL 1226 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev", 1227 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL 1228 || CU_add_test(suite, "blockdev write read max offset", 1229 blockdev_write_read_max_offset) == NULL 1230 || CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset", 1231 blockdev_overlapped_write_read_2blocks) == NULL 1232 || CU_add_test(suite, "blockdev writev readv 8 blocks", 1233 blockdev_writev_readv_8blocks) == NULL 1234 || CU_add_test(suite, "blockdev writev readv 30 x 1block", 1235 blockdev_writev_readv_30x1block) == NULL 1236 || CU_add_test(suite, "blockdev writev readv block", 1237 blockdev_writev_readv_block) == NULL 1238 || CU_add_test(suite, "blockdev writev readv size > 128k", 1239 blockdev_writev_readv_size_gt_128k) == NULL 1240 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs", 1241 blockdev_writev_readv_size_gt_128k_two_iov) == NULL 1242 || CU_add_test(suite, "blockdev comparev and writev", 1243 blockdev_comparev_and_writev) == NULL 1244 || CU_add_test(suite, "blockdev nvme passthru rw", 1245 blockdev_test_nvme_passthru_rw) == NULL 1246 || CU_add_test(suite, "blockdev nvme passthru vendor specific", 1247 blockdev_test_nvme_passthru_vendor_specific) == NULL 1248 || CU_add_test(suite, "blockdev nvme admin passthru", 1249 blockdev_test_nvme_admin_passthru) == NULL 1250 ) { 1251 CU_cleanup_registry(); 1252 rc = CU_get_error(); 1253 return -rc; 1254 } 1255 return 0; 1256 } 1257 1258 static void 1259 __run_ut_thread(void *arg) 1260 { 1261 struct spdk_jsonrpc_request *request = arg; 1262 int rc = 0; 1263 struct io_target *target; 1264 unsigned num_failures; 1265 1266 if (CU_initialize_registry() != CUE_SUCCESS) { 1267 /* CUnit error, probably won't recover */ 1268 rc = CU_get_error(); 1269 stop_init_thread(-rc, request); 1270 } 1271 1272 target = g_io_targets; 1273 while (target != NULL) { 1274 rc = __setup_ut_on_single_target(target); 1275 if (rc < 0) { 1276 /* CUnit error, probably won't recover */ 1277 stop_init_thread(-rc, request); 1278 } 1279 target = target->next; 1280 } 1281 CU_basic_set_mode(CU_BRM_VERBOSE); 1282 CU_basic_run_tests(); 1283 num_failures = CU_get_number_of_failures(); 1284 CU_cleanup_registry(); 1285 1286 stop_init_thread(num_failures, request); 1287 } 1288 1289 static void 1290 __construct_targets(void *arg) 1291 { 1292 if (bdevio_construct_targets() < 0) { 1293 spdk_app_stop(-1); 1294 return; 1295 } 1296 1297 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL); 1298 } 1299 1300 static void 1301 test_main(void *arg1) 1302 { 1303 struct spdk_cpuset tmpmask = {}; 1304 uint32_t i; 1305 1306 pthread_mutex_init(&g_test_mutex, NULL); 1307 pthread_cond_init(&g_test_cond, NULL); 1308 1309 /* This test runs specifically on at least three cores. 1310 * g_thread_init is the app_thread on main core from event framework. 1311 * Next two are only for the tests and should always be on separate CPU cores. */ 1312 if (spdk_env_get_core_count() < 3) { 1313 spdk_app_stop(-1); 1314 return; 1315 } 1316 1317 SPDK_ENV_FOREACH_CORE(i) { 1318 if (i == spdk_env_get_current_core()) { 1319 g_thread_init = spdk_get_thread(); 1320 continue; 1321 } 1322 spdk_cpuset_zero(&tmpmask); 1323 spdk_cpuset_set_cpu(&tmpmask, i, true); 1324 if (g_thread_ut == NULL) { 1325 g_thread_ut = spdk_thread_create("ut_thread", &tmpmask); 1326 } else if (g_thread_io == NULL) { 1327 g_thread_io = spdk_thread_create("io_thread", &tmpmask); 1328 } 1329 1330 } 1331 1332 if (g_wait_for_tests) { 1333 /* Do not perform any tests until RPC is received */ 1334 return; 1335 } 1336 1337 spdk_thread_send_msg(g_thread_init, __construct_targets, NULL); 1338 } 1339 1340 static void 1341 bdevio_usage(void) 1342 { 1343 printf(" -w start bdevio app and wait for RPC to start the tests\n"); 1344 } 1345 1346 static int 1347 bdevio_parse_arg(int ch, char *arg) 1348 { 1349 switch (ch) { 1350 case 'w': 1351 g_wait_for_tests = true; 1352 break; 1353 default: 1354 return -EINVAL; 1355 } 1356 return 0; 1357 } 1358 1359 struct rpc_perform_tests { 1360 char *name; 1361 }; 1362 1363 static void 1364 free_rpc_perform_tests(struct rpc_perform_tests *r) 1365 { 1366 free(r->name); 1367 } 1368 1369 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = { 1370 {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true}, 1371 }; 1372 1373 static void 1374 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request) 1375 { 1376 struct spdk_json_write_ctx *w; 1377 1378 if (num_failures == 0) { 1379 w = spdk_jsonrpc_begin_result(request); 1380 spdk_json_write_uint32(w, num_failures); 1381 spdk_jsonrpc_end_result(request, w); 1382 } else { 1383 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1384 "%d test cases failed", num_failures); 1385 } 1386 } 1387 1388 static void 1389 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) 1390 { 1391 struct rpc_perform_tests req = {NULL}; 1392 struct spdk_bdev *bdev; 1393 int rc; 1394 1395 if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders, 1396 SPDK_COUNTOF(rpc_perform_tests_decoders), 1397 &req)) { 1398 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1399 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 1400 goto invalid; 1401 } 1402 1403 if (req.name) { 1404 bdev = spdk_bdev_get_by_name(req.name); 1405 if (bdev == NULL) { 1406 SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name); 1407 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1408 "Bdev '%s' does not exist: %s", 1409 req.name, spdk_strerror(ENODEV)); 1410 goto invalid; 1411 } 1412 rc = bdevio_construct_target(bdev); 1413 if (rc < 0) { 1414 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev)); 1415 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1416 "Could not construct target for bdev '%s': %s", 1417 spdk_bdev_get_name(bdev), spdk_strerror(-rc)); 1418 goto invalid; 1419 } 1420 } else { 1421 rc = bdevio_construct_targets(); 1422 if (rc < 0) { 1423 SPDK_ERRLOG("Could not construct targets for all bdevs\n"); 1424 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1425 "Could not construct targets for all bdevs: %s", 1426 spdk_strerror(-rc)); 1427 goto invalid; 1428 } 1429 } 1430 free_rpc_perform_tests(&req); 1431 1432 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request); 1433 1434 return; 1435 1436 invalid: 1437 free_rpc_perform_tests(&req); 1438 } 1439 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME) 1440 1441 static void 1442 spdk_bdevio_shutdown_cb(void) 1443 { 1444 g_shutdown = true; 1445 spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL); 1446 } 1447 1448 int 1449 main(int argc, char **argv) 1450 { 1451 int rc; 1452 struct spdk_app_opts opts = {}; 1453 1454 spdk_app_opts_init(&opts, sizeof(opts)); 1455 opts.name = "bdevio"; 1456 opts.reactor_mask = "0x7"; 1457 opts.shutdown_cb = spdk_bdevio_shutdown_cb; 1458 1459 if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL, 1460 bdevio_parse_arg, bdevio_usage)) != 1461 SPDK_APP_PARSE_ARGS_SUCCESS) { 1462 return rc; 1463 } 1464 1465 rc = spdk_app_start(&opts, test_main, NULL); 1466 spdk_app_fini(); 1467 1468 return rc; 1469 } 1470