1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/bdev.h" 10 #include "spdk/accel.h" 11 #include "spdk/env.h" 12 #include "spdk/log.h" 13 #include "spdk/thread.h" 14 #include "spdk/event.h" 15 #include "spdk/rpc.h" 16 #include "spdk/util.h" 17 #include "spdk/string.h" 18 19 #include "bdev_internal.h" 20 #include "CUnit/Basic.h" 21 22 #define BUFFER_IOVS 1024 23 #define BUFFER_SIZE 260 * 1024 24 #define BDEV_TASK_ARRAY_SIZE 2048 25 26 pthread_mutex_t g_test_mutex; 27 pthread_cond_t g_test_cond; 28 29 static struct spdk_thread *g_thread_init; 30 static struct spdk_thread *g_thread_ut; 31 static struct spdk_thread *g_thread_io; 32 static bool g_wait_for_tests = false; 33 static int g_num_failures = 0; 34 static bool g_shutdown = false; 35 36 struct io_target { 37 struct spdk_bdev *bdev; 38 struct spdk_bdev_desc *bdev_desc; 39 struct spdk_io_channel *ch; 40 struct io_target *next; 41 }; 42 43 struct bdevio_request { 44 char *buf; 45 char *fused_buf; 46 int data_len; 47 uint64_t offset; 48 struct iovec iov[BUFFER_IOVS]; 49 int iovcnt; 50 struct iovec fused_iov[BUFFER_IOVS]; 51 int fused_iovcnt; 52 struct io_target *target; 53 }; 54 55 struct io_target *g_io_targets = NULL; 56 struct io_target *g_current_io_target = NULL; 57 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request); 58 59 static void 60 execute_spdk_function(spdk_msg_fn fn, void *arg) 61 { 62 pthread_mutex_lock(&g_test_mutex); 63 spdk_thread_send_msg(g_thread_io, fn, arg); 64 pthread_cond_wait(&g_test_cond, &g_test_mutex); 65 pthread_mutex_unlock(&g_test_mutex); 66 } 67 68 static void 69 wake_ut_thread(void) 70 { 71 pthread_mutex_lock(&g_test_mutex); 72 pthread_cond_signal(&g_test_cond); 73 pthread_mutex_unlock(&g_test_mutex); 74 } 75 76 static void 77 __get_io_channel(void *arg) 78 { 79 struct io_target *target = arg; 80 81 target->ch = spdk_bdev_get_io_channel(target->bdev_desc); 82 assert(target->ch); 83 wake_ut_thread(); 84 } 85 86 static void 87 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 88 void *event_ctx) 89 { 90 } 91 92 static int 93 bdevio_construct_target(struct spdk_bdev *bdev) 94 { 95 struct io_target *target; 96 int rc; 97 uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev); 98 uint32_t block_size = spdk_bdev_get_block_size(bdev); 99 100 target = malloc(sizeof(struct io_target)); 101 if (target == NULL) { 102 return -ENOMEM; 103 } 104 105 rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL, 106 &target->bdev_desc); 107 if (rc != 0) { 108 free(target); 109 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 110 return rc; 111 } 112 113 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n", 114 spdk_bdev_get_name(bdev), 115 num_blocks, block_size, 116 (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024)); 117 118 target->bdev = bdev; 119 target->next = g_io_targets; 120 execute_spdk_function(__get_io_channel, target); 121 g_io_targets = target; 122 123 return 0; 124 } 125 126 static int 127 bdevio_construct_targets(void) 128 { 129 struct spdk_bdev *bdev; 130 int rc; 131 132 printf("I/O targets:\n"); 133 134 bdev = spdk_bdev_first_leaf(); 135 while (bdev != NULL) { 136 rc = bdevio_construct_target(bdev); 137 if (rc < 0) { 138 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 139 return rc; 140 } 141 bdev = spdk_bdev_next_leaf(bdev); 142 } 143 144 if (g_io_targets == NULL) { 145 SPDK_ERRLOG("No bdevs to perform tests on\n"); 146 return -1; 147 } 148 149 return 0; 150 } 151 152 static void 153 __put_io_channel(void *arg) 154 { 155 struct io_target *target = arg; 156 157 spdk_put_io_channel(target->ch); 158 wake_ut_thread(); 159 } 160 161 static void 162 bdevio_cleanup_targets(void) 163 { 164 struct io_target *target; 165 166 target = g_io_targets; 167 while (target != NULL) { 168 execute_spdk_function(__put_io_channel, target); 169 spdk_bdev_close(target->bdev_desc); 170 g_io_targets = target->next; 171 free(target); 172 target = g_io_targets; 173 } 174 } 175 176 static bool g_completion_success; 177 178 static void 179 initialize_buffer(char **buf, int pattern, int size) 180 { 181 *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 182 memset(*buf, pattern, size); 183 } 184 185 static void 186 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 187 { 188 g_completion_success = success; 189 spdk_bdev_free_io(bdev_io); 190 wake_ut_thread(); 191 } 192 193 static uint64_t 194 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes) 195 { 196 uint32_t block_size = spdk_bdev_get_block_size(bdev); 197 198 CU_ASSERT(bytes % block_size == 0); 199 return bytes / block_size; 200 } 201 202 static void 203 __blockdev_write(void *arg) 204 { 205 struct bdevio_request *req = arg; 206 struct io_target *target = req->target; 207 int rc; 208 209 if (req->iovcnt) { 210 rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 211 req->data_len, quick_test_complete, NULL); 212 } else { 213 rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset, 214 req->data_len, quick_test_complete, NULL); 215 } 216 217 if (rc) { 218 g_completion_success = false; 219 wake_ut_thread(); 220 } 221 } 222 223 static void 224 __blockdev_write_zeroes(void *arg) 225 { 226 struct bdevio_request *req = arg; 227 struct io_target *target = req->target; 228 int rc; 229 230 rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset, 231 req->data_len, quick_test_complete, NULL); 232 if (rc) { 233 g_completion_success = false; 234 wake_ut_thread(); 235 } 236 } 237 238 static void 239 __blockdev_compare_and_write(void *arg) 240 { 241 struct bdevio_request *req = arg; 242 struct io_target *target = req->target; 243 struct spdk_bdev *bdev = target->bdev; 244 int rc; 245 246 rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt, 247 req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset), 248 bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL); 249 250 if (rc) { 251 g_completion_success = false; 252 wake_ut_thread(); 253 } 254 } 255 256 static void 257 sgl_chop_buffer(struct bdevio_request *req, int iov_len) 258 { 259 int data_len = req->data_len; 260 char *buf = req->buf; 261 262 req->iovcnt = 0; 263 if (!iov_len) { 264 return; 265 } 266 267 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) { 268 if (data_len < iov_len) { 269 iov_len = data_len; 270 } 271 272 req->iov[req->iovcnt].iov_base = buf; 273 req->iov[req->iovcnt].iov_len = iov_len; 274 275 buf += iov_len; 276 data_len -= iov_len; 277 } 278 279 CU_ASSERT_EQUAL_FATAL(data_len, 0); 280 } 281 282 static void 283 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len) 284 { 285 int data_len = req->data_len; 286 char *buf = req->fused_buf; 287 288 req->fused_iovcnt = 0; 289 if (!iov_len) { 290 return; 291 } 292 293 for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) { 294 if (data_len < iov_len) { 295 iov_len = data_len; 296 } 297 298 req->fused_iov[req->fused_iovcnt].iov_base = buf; 299 req->fused_iov[req->fused_iovcnt].iov_len = iov_len; 300 301 buf += iov_len; 302 data_len -= iov_len; 303 } 304 305 CU_ASSERT_EQUAL_FATAL(data_len, 0); 306 } 307 308 static void 309 blockdev_write(struct io_target *target, char *tx_buf, 310 uint64_t offset, int data_len, int iov_len) 311 { 312 struct bdevio_request req; 313 314 req.target = target; 315 req.buf = tx_buf; 316 req.data_len = data_len; 317 req.offset = offset; 318 sgl_chop_buffer(&req, iov_len); 319 320 g_completion_success = false; 321 322 execute_spdk_function(__blockdev_write, &req); 323 } 324 325 static void 326 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf, 327 uint64_t offset, int data_len, int iov_len) 328 { 329 struct bdevio_request req; 330 331 req.target = target; 332 req.buf = cmp_buf; 333 req.fused_buf = write_buf; 334 req.data_len = data_len; 335 req.offset = offset; 336 sgl_chop_buffer(&req, iov_len); 337 sgl_chop_fused_buffer(&req, iov_len); 338 339 g_completion_success = false; 340 341 execute_spdk_function(__blockdev_compare_and_write, &req); 342 } 343 344 static void 345 blockdev_write_zeroes(struct io_target *target, char *tx_buf, 346 uint64_t offset, int data_len) 347 { 348 struct bdevio_request req; 349 350 req.target = target; 351 req.buf = tx_buf; 352 req.data_len = data_len; 353 req.offset = offset; 354 355 g_completion_success = false; 356 357 execute_spdk_function(__blockdev_write_zeroes, &req); 358 } 359 360 static void 361 __blockdev_read(void *arg) 362 { 363 struct bdevio_request *req = arg; 364 struct io_target *target = req->target; 365 int rc; 366 367 if (req->iovcnt) { 368 rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 369 req->data_len, quick_test_complete, NULL); 370 } else { 371 rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset, 372 req->data_len, quick_test_complete, NULL); 373 } 374 375 if (rc) { 376 g_completion_success = false; 377 wake_ut_thread(); 378 } 379 } 380 381 static void 382 blockdev_read(struct io_target *target, char *rx_buf, 383 uint64_t offset, int data_len, int iov_len) 384 { 385 struct bdevio_request req; 386 387 req.target = target; 388 req.buf = rx_buf; 389 req.data_len = data_len; 390 req.offset = offset; 391 req.iovcnt = 0; 392 sgl_chop_buffer(&req, iov_len); 393 394 g_completion_success = false; 395 396 execute_spdk_function(__blockdev_read, &req); 397 } 398 399 static int 400 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length) 401 { 402 int rc; 403 rc = memcmp(rx_buf, tx_buf, data_length); 404 405 spdk_free(rx_buf); 406 spdk_free(tx_buf); 407 408 return rc; 409 } 410 411 static void 412 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset, 413 int expected_rc, bool write_zeroes) 414 { 415 struct io_target *target; 416 char *tx_buf = NULL; 417 char *rx_buf = NULL; 418 int rc; 419 420 target = g_current_io_target; 421 422 if (!write_zeroes) { 423 initialize_buffer(&tx_buf, pattern, data_length); 424 initialize_buffer(&rx_buf, 0, data_length); 425 426 blockdev_write(target, tx_buf, offset, data_length, iov_len); 427 } else { 428 initialize_buffer(&tx_buf, 0, data_length); 429 initialize_buffer(&rx_buf, pattern, data_length); 430 431 blockdev_write_zeroes(target, tx_buf, offset, data_length); 432 } 433 434 435 if (expected_rc == 0) { 436 CU_ASSERT_EQUAL(g_completion_success, true); 437 } else { 438 CU_ASSERT_EQUAL(g_completion_success, false); 439 } 440 blockdev_read(target, rx_buf, offset, data_length, iov_len); 441 442 if (expected_rc == 0) { 443 CU_ASSERT_EQUAL(g_completion_success, true); 444 } else { 445 CU_ASSERT_EQUAL(g_completion_success, false); 446 } 447 448 if (g_completion_success) { 449 rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length); 450 /* Assert the write by comparing it with values read 451 * from each blockdev */ 452 CU_ASSERT_EQUAL(rc, 0); 453 } 454 } 455 456 static void 457 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset) 458 { 459 struct io_target *target; 460 char *tx_buf = NULL; 461 char *write_buf = NULL; 462 char *rx_buf = NULL; 463 int rc; 464 465 target = g_current_io_target; 466 467 initialize_buffer(&tx_buf, 0xAA, data_length); 468 initialize_buffer(&rx_buf, 0, data_length); 469 initialize_buffer(&write_buf, 0xBB, data_length); 470 471 blockdev_write(target, tx_buf, offset, data_length, iov_len); 472 CU_ASSERT_EQUAL(g_completion_success, true); 473 474 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 475 CU_ASSERT_EQUAL(g_completion_success, true); 476 477 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 478 CU_ASSERT_EQUAL(g_completion_success, false); 479 480 blockdev_read(target, rx_buf, offset, data_length, iov_len); 481 CU_ASSERT_EQUAL(g_completion_success, true); 482 rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length); 483 /* Assert the write by comparing it with values read 484 * from each blockdev */ 485 CU_ASSERT_EQUAL(rc, 0); 486 } 487 488 static void 489 blockdev_write_read_block(void) 490 { 491 uint32_t data_length; 492 uint64_t offset; 493 int pattern; 494 int expected_rc; 495 struct io_target *target = g_current_io_target; 496 struct spdk_bdev *bdev = target->bdev; 497 498 /* Data size = 1 block */ 499 data_length = spdk_bdev_get_block_size(bdev); 500 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 501 offset = 0; 502 pattern = 0xA3; 503 /* Params are valid, hence the expected return value 504 * of write and read for all blockdevs is 0. */ 505 expected_rc = 0; 506 507 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 508 } 509 510 static void 511 blockdev_write_zeroes_read_block(void) 512 { 513 uint32_t data_length; 514 uint64_t offset; 515 int pattern; 516 int expected_rc; 517 struct io_target *target = g_current_io_target; 518 struct spdk_bdev *bdev = target->bdev; 519 520 /* Data size = 1 block */ 521 data_length = spdk_bdev_get_block_size(bdev); 522 offset = 0; 523 pattern = 0xA3; 524 /* Params are valid, hence the expected return value 525 * of write_zeroes and read for all blockdevs is 0. */ 526 expected_rc = 0; 527 528 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 529 } 530 531 /* 532 * This i/o will not have to split at the bdev layer. 533 */ 534 static void 535 blockdev_write_zeroes_read_no_split(void) 536 { 537 uint32_t data_length; 538 uint64_t offset; 539 int pattern; 540 int expected_rc; 541 struct io_target *target = g_current_io_target; 542 struct spdk_bdev *bdev = target->bdev; 543 544 /* Data size = block size aligned ZERO_BUFFER_SIZE */ 545 data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 546 data_length -= ZERO_BUFFER_SIZE % spdk_bdev_get_block_size(bdev); 547 offset = 0; 548 pattern = 0xA3; 549 /* Params are valid, hence the expected return value 550 * of write_zeroes and read for all blockdevs is 0. */ 551 expected_rc = 0; 552 553 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 554 } 555 556 /* 557 * This i/o will have to split at the bdev layer if 558 * write-zeroes is not supported by the bdev. 559 */ 560 static void 561 blockdev_write_zeroes_read_split(void) 562 { 563 uint32_t data_length; 564 uint64_t offset; 565 int pattern; 566 int expected_rc; 567 struct io_target *target = g_current_io_target; 568 struct spdk_bdev *bdev = target->bdev; 569 570 /* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */ 571 data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 572 data_length -= data_length % spdk_bdev_get_block_size(bdev); 573 offset = 0; 574 pattern = 0xA3; 575 /* Params are valid, hence the expected return value 576 * of write_zeroes and read for all blockdevs is 0. */ 577 expected_rc = 0; 578 579 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 580 } 581 582 /* 583 * This i/o will have to split at the bdev layer if 584 * write-zeroes is not supported by the bdev. It also 585 * tests a write size that is not an even multiple of 586 * the bdev layer zero buffer size. 587 */ 588 static void 589 blockdev_write_zeroes_read_split_partial(void) 590 { 591 uint32_t data_length; 592 uint64_t offset; 593 int pattern; 594 int expected_rc; 595 struct io_target *target = g_current_io_target; 596 struct spdk_bdev *bdev = target->bdev; 597 uint32_t block_size = spdk_bdev_get_block_size(bdev); 598 599 /* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */ 600 data_length = ZERO_BUFFER_SIZE * 7 / 2; 601 data_length -= data_length % block_size; 602 offset = 0; 603 pattern = 0xA3; 604 /* Params are valid, hence the expected return value 605 * of write_zeroes and read for all blockdevs is 0. */ 606 expected_rc = 0; 607 608 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 609 } 610 611 static void 612 blockdev_writev_readv_block(void) 613 { 614 uint32_t data_length, iov_len; 615 uint64_t offset; 616 int pattern; 617 int expected_rc; 618 struct io_target *target = g_current_io_target; 619 struct spdk_bdev *bdev = target->bdev; 620 621 /* Data size = 1 block */ 622 data_length = spdk_bdev_get_block_size(bdev); 623 iov_len = data_length; 624 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 625 offset = 0; 626 pattern = 0xA3; 627 /* Params are valid, hence the expected return value 628 * of write and read for all blockdevs is 0. */ 629 expected_rc = 0; 630 631 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 632 } 633 634 static void 635 blockdev_comparev_and_writev(void) 636 { 637 uint32_t data_length, iov_len; 638 uint64_t offset; 639 struct io_target *target = g_current_io_target; 640 struct spdk_bdev *bdev = target->bdev; 641 642 if (spdk_bdev_is_md_separate(bdev)) { 643 /* TODO: remove this check once bdev layer properly supports 644 * compare and write for bdevs with separate md. 645 */ 646 SPDK_ERRLOG("skipping comparev_and_writev on bdev %s since it has\n" 647 "separate metadata which is not supported yet.\n", 648 spdk_bdev_get_name(bdev)); 649 return; 650 } 651 652 /* Data size = acwu size */ 653 data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev); 654 iov_len = data_length; 655 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 656 offset = 0; 657 658 blockdev_compare_and_write(data_length, iov_len, offset); 659 } 660 661 static void 662 blockdev_writev_readv_30x1block(void) 663 { 664 uint32_t data_length, iov_len; 665 uint64_t offset; 666 int pattern; 667 int expected_rc; 668 struct io_target *target = g_current_io_target; 669 struct spdk_bdev *bdev = target->bdev; 670 uint32_t block_size = spdk_bdev_get_block_size(bdev); 671 672 /* Data size = 30 * block size */ 673 data_length = block_size * 30; 674 iov_len = block_size; 675 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 676 offset = 0; 677 pattern = 0xA3; 678 /* Params are valid, hence the expected return value 679 * of write and read for all blockdevs is 0. */ 680 expected_rc = 0; 681 682 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 683 } 684 685 static void 686 blockdev_write_read_8blocks(void) 687 { 688 uint32_t data_length; 689 uint64_t offset; 690 int pattern; 691 int expected_rc; 692 struct io_target *target = g_current_io_target; 693 struct spdk_bdev *bdev = target->bdev; 694 695 /* Data size = 8 * block size */ 696 data_length = spdk_bdev_get_block_size(bdev) * 8; 697 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 698 offset = data_length; 699 pattern = 0xA3; 700 /* Params are valid, hence the expected return value 701 * of write and read for all blockdevs is 0. */ 702 expected_rc = 0; 703 704 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 705 } 706 707 static void 708 blockdev_writev_readv_8blocks(void) 709 { 710 uint32_t data_length, iov_len; 711 uint64_t offset; 712 int pattern; 713 int expected_rc; 714 struct io_target *target = g_current_io_target; 715 struct spdk_bdev *bdev = target->bdev; 716 717 /* Data size = 8 * block size */ 718 data_length = spdk_bdev_get_block_size(bdev) * 8; 719 iov_len = data_length; 720 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 721 offset = data_length; 722 pattern = 0xA3; 723 /* Params are valid, hence the expected return value 724 * of write and read for all blockdevs is 0. */ 725 expected_rc = 0; 726 727 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 728 } 729 730 static void 731 blockdev_write_read_size_gt_128k(void) 732 { 733 uint32_t data_length; 734 uint64_t offset; 735 int pattern; 736 int expected_rc; 737 struct io_target *target = g_current_io_target; 738 struct spdk_bdev *bdev = target->bdev; 739 uint32_t block_size = spdk_bdev_get_block_size(bdev); 740 741 /* Data size = block size aligned 128K + 1 block */ 742 data_length = 128 * 1024; 743 data_length -= data_length % block_size; 744 data_length += block_size; 745 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 746 offset = block_size * 2; 747 pattern = 0xA3; 748 /* Params are valid, hence the expected return value 749 * of write and read for all blockdevs is 0. */ 750 expected_rc = 0; 751 752 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 753 } 754 755 static void 756 blockdev_writev_readv_size_gt_128k(void) 757 { 758 uint32_t data_length, iov_len; 759 uint64_t offset; 760 int pattern; 761 int expected_rc; 762 struct io_target *target = g_current_io_target; 763 struct spdk_bdev *bdev = target->bdev; 764 uint32_t block_size = spdk_bdev_get_block_size(bdev); 765 766 /* Data size = block size aligned 128K + 1 block */ 767 data_length = 128 * 1024; 768 data_length -= data_length % block_size; 769 data_length += block_size; 770 iov_len = data_length; 771 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 772 offset = block_size * 2; 773 pattern = 0xA3; 774 /* Params are valid, hence the expected return value 775 * of write and read for all blockdevs is 0. */ 776 expected_rc = 0; 777 778 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 779 } 780 781 static void 782 blockdev_writev_readv_size_gt_128k_two_iov(void) 783 { 784 uint32_t data_length, iov_len; 785 uint64_t offset; 786 int pattern; 787 int expected_rc; 788 struct io_target *target = g_current_io_target; 789 struct spdk_bdev *bdev = target->bdev; 790 uint32_t block_size = spdk_bdev_get_block_size(bdev); 791 792 /* Data size = block size aligned 128K + 1 block */ 793 data_length = 128 * 1024; 794 data_length -= data_length % block_size; 795 iov_len = data_length; 796 data_length += block_size; 797 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 798 offset = block_size * 2; 799 pattern = 0xA3; 800 /* Params are valid, hence the expected return value 801 * of write and read for all blockdevs is 0. */ 802 expected_rc = 0; 803 804 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 805 } 806 807 static void 808 blockdev_write_read_invalid_size(void) 809 { 810 uint32_t data_length; 811 uint64_t offset; 812 int pattern; 813 int expected_rc; 814 struct io_target *target = g_current_io_target; 815 struct spdk_bdev *bdev = target->bdev; 816 uint32_t block_size = spdk_bdev_get_block_size(bdev); 817 818 /* Data size is not a multiple of the block size */ 819 data_length = block_size - 1; 820 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 821 offset = block_size * 2; 822 pattern = 0xA3; 823 /* Params are invalid, hence the expected return value 824 * of write and read for all blockdevs is < 0 */ 825 expected_rc = -1; 826 827 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 828 } 829 830 static void 831 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void) 832 { 833 struct io_target *target; 834 struct spdk_bdev *bdev; 835 char *tx_buf = NULL; 836 char *rx_buf = NULL; 837 uint64_t offset; 838 uint32_t block_size; 839 int rc; 840 841 target = g_current_io_target; 842 bdev = target->bdev; 843 844 block_size = spdk_bdev_get_block_size(bdev); 845 846 /* The start offset has been set to a marginal value 847 * such that offset + nbytes == Total size of 848 * blockdev. */ 849 offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size); 850 851 initialize_buffer(&tx_buf, 0xA3, block_size); 852 initialize_buffer(&rx_buf, 0, block_size); 853 854 blockdev_write(target, tx_buf, offset, block_size, 0); 855 CU_ASSERT_EQUAL(g_completion_success, true); 856 857 blockdev_read(target, rx_buf, offset, block_size, 0); 858 CU_ASSERT_EQUAL(g_completion_success, true); 859 860 rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size); 861 /* Assert the write by comparing it with values read 862 * from each blockdev */ 863 CU_ASSERT_EQUAL(rc, 0); 864 } 865 866 static void 867 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void) 868 { 869 struct io_target *target = g_current_io_target; 870 struct spdk_bdev *bdev = target->bdev; 871 char *tx_buf = NULL; 872 char *rx_buf = NULL; 873 int data_length; 874 uint64_t offset; 875 int pattern; 876 uint32_t block_size = spdk_bdev_get_block_size(bdev); 877 878 /* Tests the overflow condition of the blockdevs. */ 879 data_length = block_size * 2; 880 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 881 pattern = 0xA3; 882 883 target = g_current_io_target; 884 bdev = target->bdev; 885 886 /* The start offset has been set to a valid value 887 * but offset + nbytes is greater than the Total size 888 * of the blockdev. The test should fail. */ 889 offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size; 890 891 initialize_buffer(&tx_buf, pattern, data_length); 892 initialize_buffer(&rx_buf, 0, data_length); 893 894 blockdev_write(target, tx_buf, offset, data_length, 0); 895 CU_ASSERT_EQUAL(g_completion_success, false); 896 897 blockdev_read(target, rx_buf, offset, data_length, 0); 898 CU_ASSERT_EQUAL(g_completion_success, false); 899 } 900 901 static void 902 blockdev_write_read_max_offset(void) 903 { 904 int data_length; 905 uint64_t offset; 906 int pattern; 907 int expected_rc; 908 struct io_target *target = g_current_io_target; 909 struct spdk_bdev *bdev = target->bdev; 910 911 data_length = spdk_bdev_get_block_size(bdev); 912 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 913 /* The start offset has been set to UINT64_MAX such that 914 * adding nbytes wraps around and points to an invalid address. */ 915 offset = UINT64_MAX; 916 pattern = 0xA3; 917 /* Params are invalid, hence the expected return value 918 * of write and read for all blockdevs is < 0 */ 919 expected_rc = -1; 920 921 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 922 } 923 924 static void 925 blockdev_overlapped_write_read_2blocks(void) 926 { 927 int data_length; 928 uint64_t offset; 929 int pattern; 930 int expected_rc; 931 struct io_target *target = g_current_io_target; 932 struct spdk_bdev *bdev = target->bdev; 933 934 /* Data size = 2 blocks */ 935 data_length = spdk_bdev_get_block_size(bdev) * 2; 936 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 937 offset = 0; 938 pattern = 0xA3; 939 /* Params are valid, hence the expected return value 940 * of write and read for all blockdevs is 0. */ 941 expected_rc = 0; 942 /* Assert the write by comparing it with values read 943 * from the same offset for each blockdev */ 944 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 945 946 /* Overwrite the pattern 0xbb of size 2*block size on an address offset 947 * overlapping with the address written above and assert the new value in 948 * the overlapped address range */ 949 /* Populate 2*block size with value 0xBB */ 950 pattern = 0xBB; 951 /* Offset = 1 block; Overlap offset addresses and write value 0xbb */ 952 offset = spdk_bdev_get_block_size(bdev); 953 /* Assert the write by comparing it with values read 954 * from the overlapped offset for each blockdev */ 955 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 956 } 957 958 static void 959 __blockdev_reset(void *arg) 960 { 961 struct bdevio_request *req = arg; 962 struct io_target *target = req->target; 963 int rc; 964 965 rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL); 966 if (rc < 0) { 967 g_completion_success = false; 968 wake_ut_thread(); 969 } 970 } 971 972 static void 973 blockdev_test_reset(void) 974 { 975 struct bdevio_request req; 976 struct io_target *target; 977 bool reset_supported; 978 979 target = g_current_io_target; 980 req.target = target; 981 982 reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET); 983 g_completion_success = false; 984 985 execute_spdk_function(__blockdev_reset, &req); 986 987 CU_ASSERT_EQUAL(g_completion_success, reset_supported); 988 } 989 990 struct bdevio_passthrough_request { 991 struct spdk_nvme_cmd cmd; 992 void *buf; 993 uint32_t len; 994 struct io_target *target; 995 int sct; 996 int sc; 997 uint32_t cdw0; 998 }; 999 1000 static void 1001 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 1002 { 1003 struct bdevio_passthrough_request *pt_req = arg; 1004 1005 spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc); 1006 spdk_bdev_free_io(bdev_io); 1007 wake_ut_thread(); 1008 } 1009 1010 static void 1011 __blockdev_nvme_passthru(void *arg) 1012 { 1013 struct bdevio_passthrough_request *pt_req = arg; 1014 struct io_target *target = pt_req->target; 1015 int rc; 1016 1017 rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch, 1018 &pt_req->cmd, pt_req->buf, pt_req->len, 1019 nvme_pt_test_complete, pt_req); 1020 if (rc) { 1021 wake_ut_thread(); 1022 } 1023 } 1024 1025 static void 1026 blockdev_test_nvme_passthru_rw(void) 1027 { 1028 struct bdevio_passthrough_request pt_req; 1029 void *write_buf, *read_buf; 1030 struct io_target *target; 1031 1032 target = g_current_io_target; 1033 1034 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1035 return; 1036 } 1037 1038 memset(&pt_req, 0, sizeof(pt_req)); 1039 pt_req.target = target; 1040 pt_req.cmd.opc = SPDK_NVME_OPC_WRITE; 1041 pt_req.cmd.nsid = 1; 1042 *(uint64_t *)&pt_req.cmd.cdw10 = 4; 1043 pt_req.cmd.cdw12 = 0; 1044 1045 pt_req.len = spdk_bdev_get_block_size(target->bdev); 1046 write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1047 memset(write_buf, 0xA5, pt_req.len); 1048 pt_req.buf = write_buf; 1049 1050 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1051 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1052 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1053 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1054 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1055 1056 pt_req.cmd.opc = SPDK_NVME_OPC_READ; 1057 read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1058 pt_req.buf = read_buf; 1059 1060 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1061 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1062 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1063 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1064 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1065 1066 CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len)); 1067 spdk_free(read_buf); 1068 spdk_free(write_buf); 1069 } 1070 1071 static void 1072 blockdev_test_nvme_passthru_vendor_specific(void) 1073 { 1074 struct bdevio_passthrough_request pt_req; 1075 struct io_target *target; 1076 1077 target = g_current_io_target; 1078 1079 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1080 return; 1081 } 1082 1083 memset(&pt_req, 0, sizeof(pt_req)); 1084 pt_req.target = target; 1085 pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */ 1086 pt_req.cmd.nsid = 1; 1087 1088 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1089 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1090 pt_req.cdw0 = 0xbeef; 1091 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1092 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1093 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE); 1094 CU_ASSERT(pt_req.cdw0 == 0x0); 1095 } 1096 1097 static void 1098 __blockdev_nvme_admin_passthru(void *arg) 1099 { 1100 struct bdevio_passthrough_request *pt_req = arg; 1101 struct io_target *target = pt_req->target; 1102 int rc; 1103 1104 rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch, 1105 &pt_req->cmd, pt_req->buf, pt_req->len, 1106 nvme_pt_test_complete, pt_req); 1107 if (rc) { 1108 wake_ut_thread(); 1109 } 1110 } 1111 1112 static void 1113 blockdev_test_nvme_admin_passthru(void) 1114 { 1115 struct io_target *target; 1116 struct bdevio_passthrough_request pt_req; 1117 1118 target = g_current_io_target; 1119 1120 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) { 1121 return; 1122 } 1123 1124 memset(&pt_req, 0, sizeof(pt_req)); 1125 pt_req.target = target; 1126 pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY; 1127 pt_req.cmd.nsid = 0; 1128 *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR; 1129 1130 pt_req.len = sizeof(struct spdk_nvme_ctrlr_data); 1131 pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1132 1133 pt_req.sct = SPDK_NVME_SCT_GENERIC; 1134 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1135 execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req); 1136 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1137 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1138 } 1139 1140 static void 1141 __stop_init_thread(void *arg) 1142 { 1143 unsigned num_failures = g_num_failures; 1144 struct spdk_jsonrpc_request *request = arg; 1145 1146 g_num_failures = 0; 1147 1148 bdevio_cleanup_targets(); 1149 if (g_wait_for_tests && !g_shutdown) { 1150 /* Do not stop the app yet, wait for another RPC */ 1151 rpc_perform_tests_cb(num_failures, request); 1152 return; 1153 } 1154 spdk_app_stop(num_failures); 1155 } 1156 1157 static void 1158 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request) 1159 { 1160 g_num_failures = num_failures; 1161 1162 spdk_thread_send_msg(g_thread_init, __stop_init_thread, request); 1163 } 1164 1165 static int 1166 suite_init(void) 1167 { 1168 if (g_current_io_target == NULL) { 1169 g_current_io_target = g_io_targets; 1170 } 1171 return 0; 1172 } 1173 1174 static int 1175 suite_fini(void) 1176 { 1177 g_current_io_target = g_current_io_target->next; 1178 return 0; 1179 } 1180 1181 #define SUITE_NAME_MAX 64 1182 1183 static int 1184 __setup_ut_on_single_target(struct io_target *target) 1185 { 1186 unsigned rc = 0; 1187 CU_pSuite suite = NULL; 1188 char name[SUITE_NAME_MAX]; 1189 1190 snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev)); 1191 suite = CU_add_suite(name, suite_init, suite_fini); 1192 if (suite == NULL) { 1193 CU_cleanup_registry(); 1194 rc = CU_get_error(); 1195 return -rc; 1196 } 1197 1198 if ( 1199 CU_add_test(suite, "blockdev write read block", 1200 blockdev_write_read_block) == NULL 1201 || CU_add_test(suite, "blockdev write zeroes read block", 1202 blockdev_write_zeroes_read_block) == NULL 1203 || CU_add_test(suite, "blockdev write zeroes read no split", 1204 blockdev_write_zeroes_read_no_split) == NULL 1205 || CU_add_test(suite, "blockdev write zeroes read split", 1206 blockdev_write_zeroes_read_split) == NULL 1207 || CU_add_test(suite, "blockdev write zeroes read split partial", 1208 blockdev_write_zeroes_read_split_partial) == NULL 1209 || CU_add_test(suite, "blockdev reset", 1210 blockdev_test_reset) == NULL 1211 || CU_add_test(suite, "blockdev write read 8 blocks", 1212 blockdev_write_read_8blocks) == NULL 1213 || CU_add_test(suite, "blockdev write read size > 128k", 1214 blockdev_write_read_size_gt_128k) == NULL 1215 || CU_add_test(suite, "blockdev write read invalid size", 1216 blockdev_write_read_invalid_size) == NULL 1217 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev", 1218 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL 1219 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev", 1220 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL 1221 || CU_add_test(suite, "blockdev write read max offset", 1222 blockdev_write_read_max_offset) == NULL 1223 || CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset", 1224 blockdev_overlapped_write_read_2blocks) == NULL 1225 || CU_add_test(suite, "blockdev writev readv 8 blocks", 1226 blockdev_writev_readv_8blocks) == NULL 1227 || CU_add_test(suite, "blockdev writev readv 30 x 1block", 1228 blockdev_writev_readv_30x1block) == NULL 1229 || CU_add_test(suite, "blockdev writev readv block", 1230 blockdev_writev_readv_block) == NULL 1231 || CU_add_test(suite, "blockdev writev readv size > 128k", 1232 blockdev_writev_readv_size_gt_128k) == NULL 1233 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs", 1234 blockdev_writev_readv_size_gt_128k_two_iov) == NULL 1235 || CU_add_test(suite, "blockdev comparev and writev", 1236 blockdev_comparev_and_writev) == NULL 1237 || CU_add_test(suite, "blockdev nvme passthru rw", 1238 blockdev_test_nvme_passthru_rw) == NULL 1239 || CU_add_test(suite, "blockdev nvme passthru vendor specific", 1240 blockdev_test_nvme_passthru_vendor_specific) == NULL 1241 || CU_add_test(suite, "blockdev nvme admin passthru", 1242 blockdev_test_nvme_admin_passthru) == NULL 1243 ) { 1244 CU_cleanup_registry(); 1245 rc = CU_get_error(); 1246 return -rc; 1247 } 1248 return 0; 1249 } 1250 1251 static void 1252 __run_ut_thread(void *arg) 1253 { 1254 struct spdk_jsonrpc_request *request = arg; 1255 int rc = 0; 1256 struct io_target *target; 1257 unsigned num_failures; 1258 1259 if (CU_initialize_registry() != CUE_SUCCESS) { 1260 /* CUnit error, probably won't recover */ 1261 rc = CU_get_error(); 1262 stop_init_thread(-rc, request); 1263 } 1264 1265 target = g_io_targets; 1266 while (target != NULL) { 1267 rc = __setup_ut_on_single_target(target); 1268 if (rc < 0) { 1269 /* CUnit error, probably won't recover */ 1270 stop_init_thread(-rc, request); 1271 } 1272 target = target->next; 1273 } 1274 CU_basic_set_mode(CU_BRM_VERBOSE); 1275 CU_basic_run_tests(); 1276 num_failures = CU_get_number_of_failures(); 1277 CU_cleanup_registry(); 1278 1279 stop_init_thread(num_failures, request); 1280 } 1281 1282 static void 1283 __construct_targets(void *arg) 1284 { 1285 if (bdevio_construct_targets() < 0) { 1286 spdk_app_stop(-1); 1287 return; 1288 } 1289 1290 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL); 1291 } 1292 1293 static void 1294 test_main(void *arg1) 1295 { 1296 struct spdk_cpuset tmpmask = {}; 1297 uint32_t i; 1298 1299 pthread_mutex_init(&g_test_mutex, NULL); 1300 pthread_cond_init(&g_test_cond, NULL); 1301 1302 /* This test runs specifically on at least three cores. 1303 * g_thread_init is the app_thread on main core from event framework. 1304 * Next two are only for the tests and should always be on separate CPU cores. */ 1305 if (spdk_env_get_core_count() < 3) { 1306 spdk_app_stop(-1); 1307 return; 1308 } 1309 1310 SPDK_ENV_FOREACH_CORE(i) { 1311 if (i == spdk_env_get_current_core()) { 1312 g_thread_init = spdk_get_thread(); 1313 continue; 1314 } 1315 spdk_cpuset_zero(&tmpmask); 1316 spdk_cpuset_set_cpu(&tmpmask, i, true); 1317 if (g_thread_ut == NULL) { 1318 g_thread_ut = spdk_thread_create("ut_thread", &tmpmask); 1319 } else if (g_thread_io == NULL) { 1320 g_thread_io = spdk_thread_create("io_thread", &tmpmask); 1321 } 1322 1323 } 1324 1325 if (g_wait_for_tests) { 1326 /* Do not perform any tests until RPC is received */ 1327 return; 1328 } 1329 1330 spdk_thread_send_msg(g_thread_init, __construct_targets, NULL); 1331 } 1332 1333 static void 1334 bdevio_usage(void) 1335 { 1336 printf(" -w start bdevio app and wait for RPC to start the tests\n"); 1337 } 1338 1339 static int 1340 bdevio_parse_arg(int ch, char *arg) 1341 { 1342 switch (ch) { 1343 case 'w': 1344 g_wait_for_tests = true; 1345 break; 1346 default: 1347 return -EINVAL; 1348 } 1349 return 0; 1350 } 1351 1352 struct rpc_perform_tests { 1353 char *name; 1354 }; 1355 1356 static void 1357 free_rpc_perform_tests(struct rpc_perform_tests *r) 1358 { 1359 free(r->name); 1360 } 1361 1362 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = { 1363 {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true}, 1364 }; 1365 1366 static void 1367 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request) 1368 { 1369 struct spdk_json_write_ctx *w; 1370 1371 if (num_failures == 0) { 1372 w = spdk_jsonrpc_begin_result(request); 1373 spdk_json_write_uint32(w, num_failures); 1374 spdk_jsonrpc_end_result(request, w); 1375 } else { 1376 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1377 "%d test cases failed", num_failures); 1378 } 1379 } 1380 1381 static void 1382 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) 1383 { 1384 struct rpc_perform_tests req = {NULL}; 1385 struct spdk_bdev *bdev; 1386 int rc; 1387 1388 if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders, 1389 SPDK_COUNTOF(rpc_perform_tests_decoders), 1390 &req)) { 1391 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1392 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 1393 goto invalid; 1394 } 1395 1396 if (req.name) { 1397 bdev = spdk_bdev_get_by_name(req.name); 1398 if (bdev == NULL) { 1399 SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name); 1400 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1401 "Bdev '%s' does not exist: %s", 1402 req.name, spdk_strerror(ENODEV)); 1403 goto invalid; 1404 } 1405 rc = bdevio_construct_target(bdev); 1406 if (rc < 0) { 1407 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev)); 1408 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1409 "Could not construct target for bdev '%s': %s", 1410 spdk_bdev_get_name(bdev), spdk_strerror(-rc)); 1411 goto invalid; 1412 } 1413 } else { 1414 rc = bdevio_construct_targets(); 1415 if (rc < 0) { 1416 SPDK_ERRLOG("Could not construct targets for all bdevs\n"); 1417 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1418 "Could not construct targets for all bdevs: %s", 1419 spdk_strerror(-rc)); 1420 goto invalid; 1421 } 1422 } 1423 free_rpc_perform_tests(&req); 1424 1425 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request); 1426 1427 return; 1428 1429 invalid: 1430 free_rpc_perform_tests(&req); 1431 } 1432 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME) 1433 1434 static void 1435 spdk_bdevio_shutdown_cb(void) 1436 { 1437 g_shutdown = true; 1438 spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL); 1439 } 1440 1441 int 1442 main(int argc, char **argv) 1443 { 1444 int rc; 1445 struct spdk_app_opts opts = {}; 1446 1447 spdk_app_opts_init(&opts, sizeof(opts)); 1448 opts.name = "bdevio"; 1449 opts.reactor_mask = "0x7"; 1450 opts.shutdown_cb = spdk_bdevio_shutdown_cb; 1451 1452 if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL, 1453 bdevio_parse_arg, bdevio_usage)) != 1454 SPDK_APP_PARSE_ARGS_SUCCESS) { 1455 return rc; 1456 } 1457 1458 rc = spdk_app_start(&opts, test_main, NULL); 1459 spdk_app_fini(); 1460 1461 return rc; 1462 } 1463