1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/bdev.h" 10 #include "spdk/accel_engine.h" 11 #include "spdk/env.h" 12 #include "spdk/log.h" 13 #include "spdk/thread.h" 14 #include "spdk/event.h" 15 #include "spdk/rpc.h" 16 #include "spdk/util.h" 17 #include "spdk/string.h" 18 19 #include "bdev_internal.h" 20 #include "CUnit/Basic.h" 21 22 #define BUFFER_IOVS 1024 23 #define BUFFER_SIZE 260 * 1024 24 #define BDEV_TASK_ARRAY_SIZE 2048 25 26 pthread_mutex_t g_test_mutex; 27 pthread_cond_t g_test_cond; 28 29 static struct spdk_thread *g_thread_init; 30 static struct spdk_thread *g_thread_ut; 31 static struct spdk_thread *g_thread_io; 32 static bool g_wait_for_tests = false; 33 static int g_num_failures = 0; 34 static bool g_shutdown = false; 35 36 struct io_target { 37 struct spdk_bdev *bdev; 38 struct spdk_bdev_desc *bdev_desc; 39 struct spdk_io_channel *ch; 40 struct io_target *next; 41 }; 42 43 struct bdevio_request { 44 char *buf; 45 char *fused_buf; 46 int data_len; 47 uint64_t offset; 48 struct iovec iov[BUFFER_IOVS]; 49 int iovcnt; 50 struct iovec fused_iov[BUFFER_IOVS]; 51 int fused_iovcnt; 52 struct io_target *target; 53 }; 54 55 struct io_target *g_io_targets = NULL; 56 struct io_target *g_current_io_target = NULL; 57 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request); 58 59 static void 60 execute_spdk_function(spdk_msg_fn fn, void *arg) 61 { 62 pthread_mutex_lock(&g_test_mutex); 63 spdk_thread_send_msg(g_thread_io, fn, arg); 64 pthread_cond_wait(&g_test_cond, &g_test_mutex); 65 pthread_mutex_unlock(&g_test_mutex); 66 } 67 68 static void 69 wake_ut_thread(void) 70 { 71 pthread_mutex_lock(&g_test_mutex); 72 pthread_cond_signal(&g_test_cond); 73 pthread_mutex_unlock(&g_test_mutex); 74 } 75 76 static void 77 __get_io_channel(void *arg) 78 { 79 struct io_target *target = arg; 80 81 target->ch = spdk_bdev_get_io_channel(target->bdev_desc); 82 assert(target->ch); 83 wake_ut_thread(); 84 } 85 86 static void 87 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 88 void *event_ctx) 89 { 90 } 91 92 static int 93 bdevio_construct_target(struct spdk_bdev *bdev) 94 { 95 struct io_target *target; 96 int rc; 97 uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev); 98 uint32_t block_size = spdk_bdev_get_block_size(bdev); 99 100 target = malloc(sizeof(struct io_target)); 101 if (target == NULL) { 102 return -ENOMEM; 103 } 104 105 rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL, 106 &target->bdev_desc); 107 if (rc != 0) { 108 free(target); 109 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 110 return rc; 111 } 112 113 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n", 114 spdk_bdev_get_name(bdev), 115 num_blocks, block_size, 116 (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024)); 117 118 target->bdev = bdev; 119 target->next = g_io_targets; 120 execute_spdk_function(__get_io_channel, target); 121 g_io_targets = target; 122 123 return 0; 124 } 125 126 static int 127 bdevio_construct_targets(void) 128 { 129 struct spdk_bdev *bdev; 130 int rc; 131 132 printf("I/O targets:\n"); 133 134 bdev = spdk_bdev_first_leaf(); 135 while (bdev != NULL) { 136 rc = bdevio_construct_target(bdev); 137 if (rc < 0) { 138 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 139 return rc; 140 } 141 bdev = spdk_bdev_next_leaf(bdev); 142 } 143 144 if (g_io_targets == NULL) { 145 SPDK_ERRLOG("No bdevs to perform tests on\n"); 146 return -1; 147 } 148 149 return 0; 150 } 151 152 static void 153 __put_io_channel(void *arg) 154 { 155 struct io_target *target = arg; 156 157 spdk_put_io_channel(target->ch); 158 wake_ut_thread(); 159 } 160 161 static void 162 bdevio_cleanup_targets(void) 163 { 164 struct io_target *target; 165 166 target = g_io_targets; 167 while (target != NULL) { 168 execute_spdk_function(__put_io_channel, target); 169 spdk_bdev_close(target->bdev_desc); 170 g_io_targets = target->next; 171 free(target); 172 target = g_io_targets; 173 } 174 } 175 176 static bool g_completion_success; 177 178 static void 179 initialize_buffer(char **buf, int pattern, int size) 180 { 181 *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 182 memset(*buf, pattern, size); 183 } 184 185 static void 186 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 187 { 188 g_completion_success = success; 189 spdk_bdev_free_io(bdev_io); 190 wake_ut_thread(); 191 } 192 193 static uint64_t 194 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes) 195 { 196 uint32_t block_size = spdk_bdev_get_block_size(bdev); 197 198 CU_ASSERT(bytes % block_size == 0); 199 return bytes / block_size; 200 } 201 202 static void 203 __blockdev_write(void *arg) 204 { 205 struct bdevio_request *req = arg; 206 struct io_target *target = req->target; 207 int rc; 208 209 if (req->iovcnt) { 210 rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 211 req->data_len, quick_test_complete, NULL); 212 } else { 213 rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset, 214 req->data_len, quick_test_complete, NULL); 215 } 216 217 if (rc) { 218 g_completion_success = false; 219 wake_ut_thread(); 220 } 221 } 222 223 static void 224 __blockdev_write_zeroes(void *arg) 225 { 226 struct bdevio_request *req = arg; 227 struct io_target *target = req->target; 228 int rc; 229 230 rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset, 231 req->data_len, quick_test_complete, NULL); 232 if (rc) { 233 g_completion_success = false; 234 wake_ut_thread(); 235 } 236 } 237 238 static void 239 __blockdev_compare_and_write(void *arg) 240 { 241 struct bdevio_request *req = arg; 242 struct io_target *target = req->target; 243 struct spdk_bdev *bdev = target->bdev; 244 int rc; 245 246 rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt, 247 req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset), 248 bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL); 249 250 if (rc) { 251 g_completion_success = false; 252 wake_ut_thread(); 253 } 254 } 255 256 static void 257 sgl_chop_buffer(struct bdevio_request *req, int iov_len) 258 { 259 int data_len = req->data_len; 260 char *buf = req->buf; 261 262 req->iovcnt = 0; 263 if (!iov_len) { 264 return; 265 } 266 267 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) { 268 if (data_len < iov_len) { 269 iov_len = data_len; 270 } 271 272 req->iov[req->iovcnt].iov_base = buf; 273 req->iov[req->iovcnt].iov_len = iov_len; 274 275 buf += iov_len; 276 data_len -= iov_len; 277 } 278 279 CU_ASSERT_EQUAL_FATAL(data_len, 0); 280 } 281 282 static void 283 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len) 284 { 285 int data_len = req->data_len; 286 char *buf = req->fused_buf; 287 288 req->fused_iovcnt = 0; 289 if (!iov_len) { 290 return; 291 } 292 293 for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) { 294 if (data_len < iov_len) { 295 iov_len = data_len; 296 } 297 298 req->fused_iov[req->fused_iovcnt].iov_base = buf; 299 req->fused_iov[req->fused_iovcnt].iov_len = iov_len; 300 301 buf += iov_len; 302 data_len -= iov_len; 303 } 304 305 CU_ASSERT_EQUAL_FATAL(data_len, 0); 306 } 307 308 static void 309 blockdev_write(struct io_target *target, char *tx_buf, 310 uint64_t offset, int data_len, int iov_len) 311 { 312 struct bdevio_request req; 313 314 req.target = target; 315 req.buf = tx_buf; 316 req.data_len = data_len; 317 req.offset = offset; 318 sgl_chop_buffer(&req, iov_len); 319 320 g_completion_success = false; 321 322 execute_spdk_function(__blockdev_write, &req); 323 } 324 325 static void 326 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf, 327 uint64_t offset, int data_len, int iov_len) 328 { 329 struct bdevio_request req; 330 331 req.target = target; 332 req.buf = cmp_buf; 333 req.fused_buf = write_buf; 334 req.data_len = data_len; 335 req.offset = offset; 336 sgl_chop_buffer(&req, iov_len); 337 sgl_chop_fused_buffer(&req, iov_len); 338 339 g_completion_success = false; 340 341 execute_spdk_function(__blockdev_compare_and_write, &req); 342 } 343 344 static void 345 blockdev_write_zeroes(struct io_target *target, char *tx_buf, 346 uint64_t offset, int data_len) 347 { 348 struct bdevio_request req; 349 350 req.target = target; 351 req.buf = tx_buf; 352 req.data_len = data_len; 353 req.offset = offset; 354 355 g_completion_success = false; 356 357 execute_spdk_function(__blockdev_write_zeroes, &req); 358 } 359 360 static void 361 __blockdev_read(void *arg) 362 { 363 struct bdevio_request *req = arg; 364 struct io_target *target = req->target; 365 int rc; 366 367 if (req->iovcnt) { 368 rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 369 req->data_len, quick_test_complete, NULL); 370 } else { 371 rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset, 372 req->data_len, quick_test_complete, NULL); 373 } 374 375 if (rc) { 376 g_completion_success = false; 377 wake_ut_thread(); 378 } 379 } 380 381 static void 382 blockdev_read(struct io_target *target, char *rx_buf, 383 uint64_t offset, int data_len, int iov_len) 384 { 385 struct bdevio_request req; 386 387 req.target = target; 388 req.buf = rx_buf; 389 req.data_len = data_len; 390 req.offset = offset; 391 req.iovcnt = 0; 392 sgl_chop_buffer(&req, iov_len); 393 394 g_completion_success = false; 395 396 execute_spdk_function(__blockdev_read, &req); 397 } 398 399 static int 400 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length) 401 { 402 int rc; 403 rc = memcmp(rx_buf, tx_buf, data_length); 404 405 spdk_free(rx_buf); 406 spdk_free(tx_buf); 407 408 return rc; 409 } 410 411 static void 412 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset, 413 int expected_rc, bool write_zeroes) 414 { 415 struct io_target *target; 416 char *tx_buf = NULL; 417 char *rx_buf = NULL; 418 int rc; 419 420 target = g_current_io_target; 421 422 if (!write_zeroes) { 423 initialize_buffer(&tx_buf, pattern, data_length); 424 initialize_buffer(&rx_buf, 0, data_length); 425 426 blockdev_write(target, tx_buf, offset, data_length, iov_len); 427 } else { 428 initialize_buffer(&tx_buf, 0, data_length); 429 initialize_buffer(&rx_buf, pattern, data_length); 430 431 blockdev_write_zeroes(target, tx_buf, offset, data_length); 432 } 433 434 435 if (expected_rc == 0) { 436 CU_ASSERT_EQUAL(g_completion_success, true); 437 } else { 438 CU_ASSERT_EQUAL(g_completion_success, false); 439 } 440 blockdev_read(target, rx_buf, offset, data_length, iov_len); 441 442 if (expected_rc == 0) { 443 CU_ASSERT_EQUAL(g_completion_success, true); 444 } else { 445 CU_ASSERT_EQUAL(g_completion_success, false); 446 } 447 448 if (g_completion_success) { 449 rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length); 450 /* Assert the write by comparing it with values read 451 * from each blockdev */ 452 CU_ASSERT_EQUAL(rc, 0); 453 } 454 } 455 456 static void 457 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset) 458 { 459 struct io_target *target; 460 char *tx_buf = NULL; 461 char *write_buf = NULL; 462 char *rx_buf = NULL; 463 int rc; 464 465 target = g_current_io_target; 466 467 initialize_buffer(&tx_buf, 0xAA, data_length); 468 initialize_buffer(&rx_buf, 0, data_length); 469 initialize_buffer(&write_buf, 0xBB, data_length); 470 471 blockdev_write(target, tx_buf, offset, data_length, iov_len); 472 CU_ASSERT_EQUAL(g_completion_success, true); 473 474 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 475 CU_ASSERT_EQUAL(g_completion_success, true); 476 477 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 478 CU_ASSERT_EQUAL(g_completion_success, false); 479 480 blockdev_read(target, rx_buf, offset, data_length, iov_len); 481 CU_ASSERT_EQUAL(g_completion_success, true); 482 rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length); 483 /* Assert the write by comparing it with values read 484 * from each blockdev */ 485 CU_ASSERT_EQUAL(rc, 0); 486 } 487 488 static void 489 blockdev_write_read_block(void) 490 { 491 uint32_t data_length; 492 uint64_t offset; 493 int pattern; 494 int expected_rc; 495 struct io_target *target = g_current_io_target; 496 struct spdk_bdev *bdev = target->bdev; 497 498 /* Data size = 1 block */ 499 data_length = spdk_bdev_get_block_size(bdev); 500 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 501 offset = 0; 502 pattern = 0xA3; 503 /* Params are valid, hence the expected return value 504 * of write and read for all blockdevs is 0. */ 505 expected_rc = 0; 506 507 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 508 } 509 510 static void 511 blockdev_write_zeroes_read_block(void) 512 { 513 uint32_t data_length; 514 uint64_t offset; 515 int pattern; 516 int expected_rc; 517 struct io_target *target = g_current_io_target; 518 struct spdk_bdev *bdev = target->bdev; 519 520 /* Data size = 1 block */ 521 data_length = spdk_bdev_get_block_size(bdev); 522 offset = 0; 523 pattern = 0xA3; 524 /* Params are valid, hence the expected return value 525 * of write_zeroes and read for all blockdevs is 0. */ 526 expected_rc = 0; 527 528 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 529 } 530 531 /* 532 * This i/o will not have to split at the bdev layer. 533 */ 534 static void 535 blockdev_write_zeroes_read_no_split(void) 536 { 537 uint32_t data_length; 538 uint64_t offset; 539 int pattern; 540 int expected_rc; 541 struct io_target *target = g_current_io_target; 542 struct spdk_bdev *bdev = target->bdev; 543 544 /* Data size = block size aligned ZERO_BUFFER_SIZE */ 545 data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 546 data_length -= ZERO_BUFFER_SIZE % spdk_bdev_get_block_size(bdev); 547 offset = 0; 548 pattern = 0xA3; 549 /* Params are valid, hence the expected return value 550 * of write_zeroes and read for all blockdevs is 0. */ 551 expected_rc = 0; 552 553 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 554 } 555 556 /* 557 * This i/o will have to split at the bdev layer if 558 * write-zeroes is not supported by the bdev. 559 */ 560 static void 561 blockdev_write_zeroes_read_split(void) 562 { 563 uint32_t data_length; 564 uint64_t offset; 565 int pattern; 566 int expected_rc; 567 struct io_target *target = g_current_io_target; 568 struct spdk_bdev *bdev = target->bdev; 569 570 /* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */ 571 data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 572 data_length -= data_length % spdk_bdev_get_block_size(bdev); 573 offset = 0; 574 pattern = 0xA3; 575 /* Params are valid, hence the expected return value 576 * of write_zeroes and read for all blockdevs is 0. */ 577 expected_rc = 0; 578 579 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 580 } 581 582 /* 583 * This i/o will have to split at the bdev layer if 584 * write-zeroes is not supported by the bdev. It also 585 * tests a write size that is not an even multiple of 586 * the bdev layer zero buffer size. 587 */ 588 static void 589 blockdev_write_zeroes_read_split_partial(void) 590 { 591 uint32_t data_length; 592 uint64_t offset; 593 int pattern; 594 int expected_rc; 595 struct io_target *target = g_current_io_target; 596 struct spdk_bdev *bdev = target->bdev; 597 uint32_t block_size = spdk_bdev_get_block_size(bdev); 598 599 /* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */ 600 data_length = ZERO_BUFFER_SIZE * 7 / 2; 601 data_length -= data_length % block_size; 602 offset = 0; 603 pattern = 0xA3; 604 /* Params are valid, hence the expected return value 605 * of write_zeroes and read for all blockdevs is 0. */ 606 expected_rc = 0; 607 608 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 609 } 610 611 static void 612 blockdev_writev_readv_block(void) 613 { 614 uint32_t data_length, iov_len; 615 uint64_t offset; 616 int pattern; 617 int expected_rc; 618 struct io_target *target = g_current_io_target; 619 struct spdk_bdev *bdev = target->bdev; 620 621 /* Data size = 1 block */ 622 data_length = spdk_bdev_get_block_size(bdev); 623 iov_len = data_length; 624 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 625 offset = 0; 626 pattern = 0xA3; 627 /* Params are valid, hence the expected return value 628 * of write and read for all blockdevs is 0. */ 629 expected_rc = 0; 630 631 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 632 } 633 634 static void 635 blockdev_comparev_and_writev(void) 636 { 637 uint32_t data_length, iov_len; 638 uint64_t offset; 639 struct io_target *target = g_current_io_target; 640 struct spdk_bdev *bdev = target->bdev; 641 642 /* Data size = acwu size */ 643 data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev); 644 iov_len = data_length; 645 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 646 offset = 0; 647 648 blockdev_compare_and_write(data_length, iov_len, offset); 649 } 650 651 static void 652 blockdev_writev_readv_30x1block(void) 653 { 654 uint32_t data_length, iov_len; 655 uint64_t offset; 656 int pattern; 657 int expected_rc; 658 struct io_target *target = g_current_io_target; 659 struct spdk_bdev *bdev = target->bdev; 660 uint32_t block_size = spdk_bdev_get_block_size(bdev); 661 662 /* Data size = 30 * block size */ 663 data_length = block_size * 30; 664 iov_len = block_size; 665 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 666 offset = 0; 667 pattern = 0xA3; 668 /* Params are valid, hence the expected return value 669 * of write and read for all blockdevs is 0. */ 670 expected_rc = 0; 671 672 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 673 } 674 675 static void 676 blockdev_write_read_8blocks(void) 677 { 678 uint32_t data_length; 679 uint64_t offset; 680 int pattern; 681 int expected_rc; 682 struct io_target *target = g_current_io_target; 683 struct spdk_bdev *bdev = target->bdev; 684 685 /* Data size = 8 * block size */ 686 data_length = spdk_bdev_get_block_size(bdev) * 8; 687 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 688 offset = data_length; 689 pattern = 0xA3; 690 /* Params are valid, hence the expected return value 691 * of write and read for all blockdevs is 0. */ 692 expected_rc = 0; 693 694 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 695 } 696 697 static void 698 blockdev_writev_readv_8blocks(void) 699 { 700 uint32_t data_length, iov_len; 701 uint64_t offset; 702 int pattern; 703 int expected_rc; 704 struct io_target *target = g_current_io_target; 705 struct spdk_bdev *bdev = target->bdev; 706 707 /* Data size = 8 * block size */ 708 data_length = spdk_bdev_get_block_size(bdev) * 8; 709 iov_len = data_length; 710 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 711 offset = data_length; 712 pattern = 0xA3; 713 /* Params are valid, hence the expected return value 714 * of write and read for all blockdevs is 0. */ 715 expected_rc = 0; 716 717 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 718 } 719 720 static void 721 blockdev_write_read_size_gt_128k(void) 722 { 723 uint32_t data_length; 724 uint64_t offset; 725 int pattern; 726 int expected_rc; 727 struct io_target *target = g_current_io_target; 728 struct spdk_bdev *bdev = target->bdev; 729 uint32_t block_size = spdk_bdev_get_block_size(bdev); 730 731 /* Data size = block size aligned 128K + 1 block */ 732 data_length = 128 * 1024; 733 data_length -= data_length % block_size; 734 data_length += block_size; 735 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 736 offset = block_size * 2; 737 pattern = 0xA3; 738 /* Params are valid, hence the expected return value 739 * of write and read for all blockdevs is 0. */ 740 expected_rc = 0; 741 742 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 743 } 744 745 static void 746 blockdev_writev_readv_size_gt_128k(void) 747 { 748 uint32_t data_length, iov_len; 749 uint64_t offset; 750 int pattern; 751 int expected_rc; 752 struct io_target *target = g_current_io_target; 753 struct spdk_bdev *bdev = target->bdev; 754 uint32_t block_size = spdk_bdev_get_block_size(bdev); 755 756 /* Data size = block size aligned 128K + 1 block */ 757 data_length = 128 * 1024; 758 data_length -= data_length % block_size; 759 data_length += block_size; 760 iov_len = data_length; 761 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 762 offset = block_size * 2; 763 pattern = 0xA3; 764 /* Params are valid, hence the expected return value 765 * of write and read for all blockdevs is 0. */ 766 expected_rc = 0; 767 768 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 769 } 770 771 static void 772 blockdev_writev_readv_size_gt_128k_two_iov(void) 773 { 774 uint32_t data_length, iov_len; 775 uint64_t offset; 776 int pattern; 777 int expected_rc; 778 struct io_target *target = g_current_io_target; 779 struct spdk_bdev *bdev = target->bdev; 780 uint32_t block_size = spdk_bdev_get_block_size(bdev); 781 782 /* Data size = block size aligned 128K + 1 block */ 783 data_length = 128 * 1024; 784 data_length -= data_length % block_size; 785 iov_len = data_length; 786 data_length += block_size; 787 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 788 offset = block_size * 2; 789 pattern = 0xA3; 790 /* Params are valid, hence the expected return value 791 * of write and read for all blockdevs is 0. */ 792 expected_rc = 0; 793 794 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 795 } 796 797 static void 798 blockdev_write_read_invalid_size(void) 799 { 800 uint32_t data_length; 801 uint64_t offset; 802 int pattern; 803 int expected_rc; 804 struct io_target *target = g_current_io_target; 805 struct spdk_bdev *bdev = target->bdev; 806 uint32_t block_size = spdk_bdev_get_block_size(bdev); 807 808 /* Data size is not a multiple of the block size */ 809 data_length = block_size - 1; 810 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 811 offset = block_size * 2; 812 pattern = 0xA3; 813 /* Params are invalid, hence the expected return value 814 * of write and read for all blockdevs is < 0 */ 815 expected_rc = -1; 816 817 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 818 } 819 820 static void 821 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void) 822 { 823 struct io_target *target; 824 struct spdk_bdev *bdev; 825 char *tx_buf = NULL; 826 char *rx_buf = NULL; 827 uint64_t offset; 828 uint32_t block_size; 829 int rc; 830 831 target = g_current_io_target; 832 bdev = target->bdev; 833 834 block_size = spdk_bdev_get_block_size(bdev); 835 836 /* The start offset has been set to a marginal value 837 * such that offset + nbytes == Total size of 838 * blockdev. */ 839 offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size); 840 841 initialize_buffer(&tx_buf, 0xA3, block_size); 842 initialize_buffer(&rx_buf, 0, block_size); 843 844 blockdev_write(target, tx_buf, offset, block_size, 0); 845 CU_ASSERT_EQUAL(g_completion_success, true); 846 847 blockdev_read(target, rx_buf, offset, block_size, 0); 848 CU_ASSERT_EQUAL(g_completion_success, true); 849 850 rc = blockdev_write_read_data_match(rx_buf, tx_buf, block_size); 851 /* Assert the write by comparing it with values read 852 * from each blockdev */ 853 CU_ASSERT_EQUAL(rc, 0); 854 } 855 856 static void 857 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void) 858 { 859 struct io_target *target = g_current_io_target; 860 struct spdk_bdev *bdev = target->bdev; 861 char *tx_buf = NULL; 862 char *rx_buf = NULL; 863 int data_length; 864 uint64_t offset; 865 int pattern; 866 uint32_t block_size = spdk_bdev_get_block_size(bdev); 867 868 /* Tests the overflow condition of the blockdevs. */ 869 data_length = block_size * 2; 870 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 871 pattern = 0xA3; 872 873 target = g_current_io_target; 874 bdev = target->bdev; 875 876 /* The start offset has been set to a valid value 877 * but offset + nbytes is greater than the Total size 878 * of the blockdev. The test should fail. */ 879 offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size; 880 881 initialize_buffer(&tx_buf, pattern, data_length); 882 initialize_buffer(&rx_buf, 0, data_length); 883 884 blockdev_write(target, tx_buf, offset, data_length, 0); 885 CU_ASSERT_EQUAL(g_completion_success, false); 886 887 blockdev_read(target, rx_buf, offset, data_length, 0); 888 CU_ASSERT_EQUAL(g_completion_success, false); 889 } 890 891 static void 892 blockdev_write_read_max_offset(void) 893 { 894 int data_length; 895 uint64_t offset; 896 int pattern; 897 int expected_rc; 898 struct io_target *target = g_current_io_target; 899 struct spdk_bdev *bdev = target->bdev; 900 901 data_length = spdk_bdev_get_block_size(bdev); 902 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 903 /* The start offset has been set to UINT64_MAX such that 904 * adding nbytes wraps around and points to an invalid address. */ 905 offset = UINT64_MAX; 906 pattern = 0xA3; 907 /* Params are invalid, hence the expected return value 908 * of write and read for all blockdevs is < 0 */ 909 expected_rc = -1; 910 911 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 912 } 913 914 static void 915 blockdev_overlapped_write_read_2blocks(void) 916 { 917 int data_length; 918 uint64_t offset; 919 int pattern; 920 int expected_rc; 921 struct io_target *target = g_current_io_target; 922 struct spdk_bdev *bdev = target->bdev; 923 924 /* Data size = 2 blocks */ 925 data_length = spdk_bdev_get_block_size(bdev) * 2; 926 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 927 offset = 0; 928 pattern = 0xA3; 929 /* Params are valid, hence the expected return value 930 * of write and read for all blockdevs is 0. */ 931 expected_rc = 0; 932 /* Assert the write by comparing it with values read 933 * from the same offset for each blockdev */ 934 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 935 936 /* Overwrite the pattern 0xbb of size 2*block size on an address offset 937 * overlapping with the address written above and assert the new value in 938 * the overlapped address range */ 939 /* Populate 2*block size with value 0xBB */ 940 pattern = 0xBB; 941 /* Offset = 1 block; Overlap offset addresses and write value 0xbb */ 942 offset = spdk_bdev_get_block_size(bdev); 943 /* Assert the write by comparing it with values read 944 * from the overlapped offset for each blockdev */ 945 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 946 } 947 948 static void 949 __blockdev_reset(void *arg) 950 { 951 struct bdevio_request *req = arg; 952 struct io_target *target = req->target; 953 int rc; 954 955 rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL); 956 if (rc < 0) { 957 g_completion_success = false; 958 wake_ut_thread(); 959 } 960 } 961 962 static void 963 blockdev_test_reset(void) 964 { 965 struct bdevio_request req; 966 struct io_target *target; 967 bool reset_supported; 968 969 target = g_current_io_target; 970 req.target = target; 971 972 reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET); 973 g_completion_success = false; 974 975 execute_spdk_function(__blockdev_reset, &req); 976 977 CU_ASSERT_EQUAL(g_completion_success, reset_supported); 978 } 979 980 struct bdevio_passthrough_request { 981 struct spdk_nvme_cmd cmd; 982 void *buf; 983 uint32_t len; 984 struct io_target *target; 985 int sct; 986 int sc; 987 uint32_t cdw0; 988 }; 989 990 static void 991 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 992 { 993 struct bdevio_passthrough_request *pt_req = arg; 994 995 spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc); 996 spdk_bdev_free_io(bdev_io); 997 wake_ut_thread(); 998 } 999 1000 static void 1001 __blockdev_nvme_passthru(void *arg) 1002 { 1003 struct bdevio_passthrough_request *pt_req = arg; 1004 struct io_target *target = pt_req->target; 1005 int rc; 1006 1007 rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch, 1008 &pt_req->cmd, pt_req->buf, pt_req->len, 1009 nvme_pt_test_complete, pt_req); 1010 if (rc) { 1011 wake_ut_thread(); 1012 } 1013 } 1014 1015 static void 1016 blockdev_test_nvme_passthru_rw(void) 1017 { 1018 struct bdevio_passthrough_request pt_req; 1019 void *write_buf, *read_buf; 1020 struct io_target *target; 1021 1022 target = g_current_io_target; 1023 1024 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1025 return; 1026 } 1027 1028 memset(&pt_req, 0, sizeof(pt_req)); 1029 pt_req.target = target; 1030 pt_req.cmd.opc = SPDK_NVME_OPC_WRITE; 1031 pt_req.cmd.nsid = 1; 1032 *(uint64_t *)&pt_req.cmd.cdw10 = 4; 1033 pt_req.cmd.cdw12 = 0; 1034 1035 pt_req.len = spdk_bdev_get_block_size(target->bdev); 1036 write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1037 memset(write_buf, 0xA5, pt_req.len); 1038 pt_req.buf = write_buf; 1039 1040 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1041 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1042 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1043 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1044 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1045 1046 pt_req.cmd.opc = SPDK_NVME_OPC_READ; 1047 read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1048 pt_req.buf = read_buf; 1049 1050 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1051 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1052 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1053 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1054 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1055 1056 CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len)); 1057 spdk_free(read_buf); 1058 spdk_free(write_buf); 1059 } 1060 1061 static void 1062 blockdev_test_nvme_passthru_vendor_specific(void) 1063 { 1064 struct bdevio_passthrough_request pt_req; 1065 struct io_target *target; 1066 1067 target = g_current_io_target; 1068 1069 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1070 return; 1071 } 1072 1073 memset(&pt_req, 0, sizeof(pt_req)); 1074 pt_req.target = target; 1075 pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */ 1076 pt_req.cmd.nsid = 1; 1077 1078 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1079 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1080 pt_req.cdw0 = 0xbeef; 1081 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1082 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1083 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE); 1084 CU_ASSERT(pt_req.cdw0 == 0x0); 1085 } 1086 1087 static void 1088 __blockdev_nvme_admin_passthru(void *arg) 1089 { 1090 struct bdevio_passthrough_request *pt_req = arg; 1091 struct io_target *target = pt_req->target; 1092 int rc; 1093 1094 rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch, 1095 &pt_req->cmd, pt_req->buf, pt_req->len, 1096 nvme_pt_test_complete, pt_req); 1097 if (rc) { 1098 wake_ut_thread(); 1099 } 1100 } 1101 1102 static void 1103 blockdev_test_nvme_admin_passthru(void) 1104 { 1105 struct io_target *target; 1106 struct bdevio_passthrough_request pt_req; 1107 1108 target = g_current_io_target; 1109 1110 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) { 1111 return; 1112 } 1113 1114 memset(&pt_req, 0, sizeof(pt_req)); 1115 pt_req.target = target; 1116 pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY; 1117 pt_req.cmd.nsid = 0; 1118 *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR; 1119 1120 pt_req.len = sizeof(struct spdk_nvme_ctrlr_data); 1121 pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1122 1123 pt_req.sct = SPDK_NVME_SCT_GENERIC; 1124 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1125 execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req); 1126 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1127 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1128 } 1129 1130 static void 1131 __stop_init_thread(void *arg) 1132 { 1133 unsigned num_failures = g_num_failures; 1134 struct spdk_jsonrpc_request *request = arg; 1135 1136 g_num_failures = 0; 1137 1138 bdevio_cleanup_targets(); 1139 if (g_wait_for_tests && !g_shutdown) { 1140 /* Do not stop the app yet, wait for another RPC */ 1141 rpc_perform_tests_cb(num_failures, request); 1142 return; 1143 } 1144 spdk_app_stop(num_failures); 1145 } 1146 1147 static void 1148 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request) 1149 { 1150 g_num_failures = num_failures; 1151 1152 spdk_thread_send_msg(g_thread_init, __stop_init_thread, request); 1153 } 1154 1155 static int 1156 suite_init(void) 1157 { 1158 if (g_current_io_target == NULL) { 1159 g_current_io_target = g_io_targets; 1160 } 1161 return 0; 1162 } 1163 1164 static int 1165 suite_fini(void) 1166 { 1167 g_current_io_target = g_current_io_target->next; 1168 return 0; 1169 } 1170 1171 #define SUITE_NAME_MAX 64 1172 1173 static int 1174 __setup_ut_on_single_target(struct io_target *target) 1175 { 1176 unsigned rc = 0; 1177 CU_pSuite suite = NULL; 1178 char name[SUITE_NAME_MAX]; 1179 1180 snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev)); 1181 suite = CU_add_suite(name, suite_init, suite_fini); 1182 if (suite == NULL) { 1183 CU_cleanup_registry(); 1184 rc = CU_get_error(); 1185 return -rc; 1186 } 1187 1188 if ( 1189 CU_add_test(suite, "blockdev write read block", 1190 blockdev_write_read_block) == NULL 1191 || CU_add_test(suite, "blockdev write zeroes read block", 1192 blockdev_write_zeroes_read_block) == NULL 1193 || CU_add_test(suite, "blockdev write zeroes read no split", 1194 blockdev_write_zeroes_read_no_split) == NULL 1195 || CU_add_test(suite, "blockdev write zeroes read split", 1196 blockdev_write_zeroes_read_split) == NULL 1197 || CU_add_test(suite, "blockdev write zeroes read split partial", 1198 blockdev_write_zeroes_read_split_partial) == NULL 1199 || CU_add_test(suite, "blockdev reset", 1200 blockdev_test_reset) == NULL 1201 || CU_add_test(suite, "blockdev write read 8 blocks", 1202 blockdev_write_read_8blocks) == NULL 1203 || CU_add_test(suite, "blockdev write read size > 128k", 1204 blockdev_write_read_size_gt_128k) == NULL 1205 || CU_add_test(suite, "blockdev write read invalid size", 1206 blockdev_write_read_invalid_size) == NULL 1207 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev", 1208 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL 1209 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev", 1210 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL 1211 || CU_add_test(suite, "blockdev write read max offset", 1212 blockdev_write_read_max_offset) == NULL 1213 || CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset", 1214 blockdev_overlapped_write_read_2blocks) == NULL 1215 || CU_add_test(suite, "blockdev writev readv 8 blocks", 1216 blockdev_writev_readv_8blocks) == NULL 1217 || CU_add_test(suite, "blockdev writev readv 30 x 1block", 1218 blockdev_writev_readv_30x1block) == NULL 1219 || CU_add_test(suite, "blockdev writev readv block", 1220 blockdev_writev_readv_block) == NULL 1221 || CU_add_test(suite, "blockdev writev readv size > 128k", 1222 blockdev_writev_readv_size_gt_128k) == NULL 1223 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs", 1224 blockdev_writev_readv_size_gt_128k_two_iov) == NULL 1225 || CU_add_test(suite, "blockdev comparev and writev", 1226 blockdev_comparev_and_writev) == NULL 1227 || CU_add_test(suite, "blockdev nvme passthru rw", 1228 blockdev_test_nvme_passthru_rw) == NULL 1229 || CU_add_test(suite, "blockdev nvme passthru vendor specific", 1230 blockdev_test_nvme_passthru_vendor_specific) == NULL 1231 || CU_add_test(suite, "blockdev nvme admin passthru", 1232 blockdev_test_nvme_admin_passthru) == NULL 1233 ) { 1234 CU_cleanup_registry(); 1235 rc = CU_get_error(); 1236 return -rc; 1237 } 1238 return 0; 1239 } 1240 1241 static void 1242 __run_ut_thread(void *arg) 1243 { 1244 struct spdk_jsonrpc_request *request = arg; 1245 int rc = 0; 1246 struct io_target *target; 1247 unsigned num_failures; 1248 1249 if (CU_initialize_registry() != CUE_SUCCESS) { 1250 /* CUnit error, probably won't recover */ 1251 rc = CU_get_error(); 1252 stop_init_thread(-rc, request); 1253 } 1254 1255 target = g_io_targets; 1256 while (target != NULL) { 1257 rc = __setup_ut_on_single_target(target); 1258 if (rc < 0) { 1259 /* CUnit error, probably won't recover */ 1260 stop_init_thread(-rc, request); 1261 } 1262 target = target->next; 1263 } 1264 CU_basic_set_mode(CU_BRM_VERBOSE); 1265 CU_basic_run_tests(); 1266 num_failures = CU_get_number_of_failures(); 1267 CU_cleanup_registry(); 1268 1269 stop_init_thread(num_failures, request); 1270 } 1271 1272 static void 1273 __construct_targets(void *arg) 1274 { 1275 if (bdevio_construct_targets() < 0) { 1276 spdk_app_stop(-1); 1277 return; 1278 } 1279 1280 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL); 1281 } 1282 1283 static void 1284 test_main(void *arg1) 1285 { 1286 struct spdk_cpuset tmpmask = {}; 1287 uint32_t i; 1288 1289 pthread_mutex_init(&g_test_mutex, NULL); 1290 pthread_cond_init(&g_test_cond, NULL); 1291 1292 /* This test runs specifically on at least three cores. 1293 * g_thread_init is the app_thread on main core from event framework. 1294 * Next two are only for the tests and should always be on separate CPU cores. */ 1295 if (spdk_env_get_core_count() < 3) { 1296 spdk_app_stop(-1); 1297 return; 1298 } 1299 1300 SPDK_ENV_FOREACH_CORE(i) { 1301 if (i == spdk_env_get_current_core()) { 1302 g_thread_init = spdk_get_thread(); 1303 continue; 1304 } 1305 spdk_cpuset_zero(&tmpmask); 1306 spdk_cpuset_set_cpu(&tmpmask, i, true); 1307 if (g_thread_ut == NULL) { 1308 g_thread_ut = spdk_thread_create("ut_thread", &tmpmask); 1309 } else if (g_thread_io == NULL) { 1310 g_thread_io = spdk_thread_create("io_thread", &tmpmask); 1311 } 1312 1313 } 1314 1315 if (g_wait_for_tests) { 1316 /* Do not perform any tests until RPC is received */ 1317 return; 1318 } 1319 1320 spdk_thread_send_msg(g_thread_init, __construct_targets, NULL); 1321 } 1322 1323 static void 1324 bdevio_usage(void) 1325 { 1326 printf(" -w start bdevio app and wait for RPC to start the tests\n"); 1327 } 1328 1329 static int 1330 bdevio_parse_arg(int ch, char *arg) 1331 { 1332 switch (ch) { 1333 case 'w': 1334 g_wait_for_tests = true; 1335 break; 1336 default: 1337 return -EINVAL; 1338 } 1339 return 0; 1340 } 1341 1342 struct rpc_perform_tests { 1343 char *name; 1344 }; 1345 1346 static void 1347 free_rpc_perform_tests(struct rpc_perform_tests *r) 1348 { 1349 free(r->name); 1350 } 1351 1352 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = { 1353 {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true}, 1354 }; 1355 1356 static void 1357 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request) 1358 { 1359 struct spdk_json_write_ctx *w; 1360 1361 if (num_failures == 0) { 1362 w = spdk_jsonrpc_begin_result(request); 1363 spdk_json_write_uint32(w, num_failures); 1364 spdk_jsonrpc_end_result(request, w); 1365 } else { 1366 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1367 "%d test cases failed", num_failures); 1368 } 1369 } 1370 1371 static void 1372 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) 1373 { 1374 struct rpc_perform_tests req = {NULL}; 1375 struct spdk_bdev *bdev; 1376 int rc; 1377 1378 if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders, 1379 SPDK_COUNTOF(rpc_perform_tests_decoders), 1380 &req)) { 1381 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1382 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 1383 goto invalid; 1384 } 1385 1386 if (req.name) { 1387 bdev = spdk_bdev_get_by_name(req.name); 1388 if (bdev == NULL) { 1389 SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name); 1390 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1391 "Bdev '%s' does not exist: %s", 1392 req.name, spdk_strerror(ENODEV)); 1393 goto invalid; 1394 } 1395 rc = bdevio_construct_target(bdev); 1396 if (rc < 0) { 1397 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev)); 1398 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1399 "Could not construct target for bdev '%s': %s", 1400 spdk_bdev_get_name(bdev), spdk_strerror(-rc)); 1401 goto invalid; 1402 } 1403 } else { 1404 rc = bdevio_construct_targets(); 1405 if (rc < 0) { 1406 SPDK_ERRLOG("Could not construct targets for all bdevs\n"); 1407 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1408 "Could not construct targets for all bdevs: %s", 1409 spdk_strerror(-rc)); 1410 goto invalid; 1411 } 1412 } 1413 free_rpc_perform_tests(&req); 1414 1415 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request); 1416 1417 return; 1418 1419 invalid: 1420 free_rpc_perform_tests(&req); 1421 } 1422 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME) 1423 1424 static void 1425 spdk_bdevio_shutdown_cb(void) 1426 { 1427 g_shutdown = true; 1428 spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL); 1429 } 1430 1431 int 1432 main(int argc, char **argv) 1433 { 1434 int rc; 1435 struct spdk_app_opts opts = {}; 1436 1437 spdk_app_opts_init(&opts, sizeof(opts)); 1438 opts.name = "bdevio"; 1439 opts.reactor_mask = "0x7"; 1440 opts.shutdown_cb = spdk_bdevio_shutdown_cb; 1441 1442 if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL, 1443 bdevio_parse_arg, bdevio_usage)) != 1444 SPDK_APP_PARSE_ARGS_SUCCESS) { 1445 return rc; 1446 } 1447 1448 rc = spdk_app_start(&opts, test_main, NULL); 1449 spdk_app_fini(); 1450 1451 return rc; 1452 } 1453