1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/bdev.h" 10 #include "spdk/accel.h" 11 #include "spdk/env.h" 12 #include "spdk/log.h" 13 #include "spdk/thread.h" 14 #include "spdk/event.h" 15 #include "spdk/rpc.h" 16 #include "spdk/util.h" 17 #include "spdk/string.h" 18 19 #include "bdev_internal.h" 20 #include "CUnit/Basic.h" 21 22 #define BUFFER_IOVS 1024 23 #define BUFFER_SIZE 260 * 1024 24 #define BDEV_TASK_ARRAY_SIZE 2048 25 26 pthread_mutex_t g_test_mutex; 27 pthread_cond_t g_test_cond; 28 29 static struct spdk_thread *g_thread_init; 30 static struct spdk_thread *g_thread_ut; 31 static struct spdk_thread *g_thread_io; 32 static bool g_wait_for_tests = false; 33 static int g_num_failures = 0; 34 static bool g_shutdown = false; 35 36 struct io_target { 37 struct spdk_bdev *bdev; 38 struct spdk_bdev_desc *bdev_desc; 39 struct spdk_io_channel *ch; 40 struct io_target *next; 41 }; 42 43 struct bdevio_request { 44 char *buf; 45 char *fused_buf; 46 int data_len; 47 uint64_t offset; 48 struct iovec iov[BUFFER_IOVS]; 49 int iovcnt; 50 struct iovec fused_iov[BUFFER_IOVS]; 51 int fused_iovcnt; 52 struct io_target *target; 53 }; 54 55 struct io_target *g_io_targets = NULL; 56 struct io_target *g_current_io_target = NULL; 57 static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request); 58 59 static void 60 execute_spdk_function(spdk_msg_fn fn, void *arg) 61 { 62 pthread_mutex_lock(&g_test_mutex); 63 spdk_thread_send_msg(g_thread_io, fn, arg); 64 pthread_cond_wait(&g_test_cond, &g_test_mutex); 65 pthread_mutex_unlock(&g_test_mutex); 66 } 67 68 static void 69 wake_ut_thread(void) 70 { 71 pthread_mutex_lock(&g_test_mutex); 72 pthread_cond_signal(&g_test_cond); 73 pthread_mutex_unlock(&g_test_mutex); 74 } 75 76 static void 77 __get_io_channel(void *arg) 78 { 79 struct io_target *target = arg; 80 81 target->ch = spdk_bdev_get_io_channel(target->bdev_desc); 82 assert(target->ch); 83 wake_ut_thread(); 84 } 85 86 static void 87 bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 88 void *event_ctx) 89 { 90 } 91 92 static int 93 bdevio_construct_target(struct spdk_bdev *bdev) 94 { 95 struct io_target *target; 96 int rc; 97 uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev); 98 uint32_t block_size = spdk_bdev_get_block_size(bdev); 99 100 target = malloc(sizeof(struct io_target)); 101 if (target == NULL) { 102 return -ENOMEM; 103 } 104 105 rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL, 106 &target->bdev_desc); 107 if (rc != 0) { 108 free(target); 109 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 110 return rc; 111 } 112 113 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n", 114 spdk_bdev_get_name(bdev), 115 num_blocks, block_size, 116 (num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024)); 117 118 target->bdev = bdev; 119 target->next = g_io_targets; 120 execute_spdk_function(__get_io_channel, target); 121 g_io_targets = target; 122 123 return 0; 124 } 125 126 static int 127 bdevio_construct_targets(void) 128 { 129 struct spdk_bdev *bdev; 130 int rc; 131 132 printf("I/O targets:\n"); 133 134 bdev = spdk_bdev_first_leaf(); 135 while (bdev != NULL) { 136 rc = bdevio_construct_target(bdev); 137 if (rc < 0) { 138 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc); 139 return rc; 140 } 141 bdev = spdk_bdev_next_leaf(bdev); 142 } 143 144 if (g_io_targets == NULL) { 145 SPDK_ERRLOG("No bdevs to perform tests on\n"); 146 return -1; 147 } 148 149 return 0; 150 } 151 152 static void 153 __put_io_channel(void *arg) 154 { 155 struct io_target *target = arg; 156 157 spdk_put_io_channel(target->ch); 158 wake_ut_thread(); 159 } 160 161 static void 162 bdevio_cleanup_targets(void) 163 { 164 struct io_target *target; 165 166 target = g_io_targets; 167 while (target != NULL) { 168 execute_spdk_function(__put_io_channel, target); 169 spdk_bdev_close(target->bdev_desc); 170 g_io_targets = target->next; 171 free(target); 172 target = g_io_targets; 173 } 174 } 175 176 static bool g_completion_success; 177 178 static void 179 initialize_buffer(char **buf, int pattern, int size) 180 { 181 *buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 182 memset(*buf, pattern, size); 183 } 184 185 static void 186 quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 187 { 188 g_completion_success = success; 189 spdk_bdev_free_io(bdev_io); 190 wake_ut_thread(); 191 } 192 193 static uint64_t 194 bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes) 195 { 196 uint32_t block_size = spdk_bdev_get_block_size(bdev); 197 198 CU_ASSERT(bytes % block_size == 0); 199 return bytes / block_size; 200 } 201 202 static void 203 __blockdev_write(void *arg) 204 { 205 struct bdevio_request *req = arg; 206 struct io_target *target = req->target; 207 int rc; 208 209 if (req->iovcnt) { 210 rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 211 req->data_len, quick_test_complete, NULL); 212 } else { 213 rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset, 214 req->data_len, quick_test_complete, NULL); 215 } 216 217 if (rc) { 218 g_completion_success = false; 219 wake_ut_thread(); 220 } 221 } 222 223 static void 224 __blockdev_write_zeroes(void *arg) 225 { 226 struct bdevio_request *req = arg; 227 struct io_target *target = req->target; 228 int rc; 229 230 rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset, 231 req->data_len, quick_test_complete, NULL); 232 if (rc) { 233 g_completion_success = false; 234 wake_ut_thread(); 235 } 236 } 237 238 static void 239 __blockdev_compare_and_write(void *arg) 240 { 241 struct bdevio_request *req = arg; 242 struct io_target *target = req->target; 243 struct spdk_bdev *bdev = target->bdev; 244 int rc; 245 246 rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt, 247 req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset), 248 bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL); 249 250 if (rc) { 251 g_completion_success = false; 252 wake_ut_thread(); 253 } 254 } 255 256 static void 257 sgl_chop_buffer(struct bdevio_request *req, int iov_len) 258 { 259 int data_len = req->data_len; 260 char *buf = req->buf; 261 262 req->iovcnt = 0; 263 if (!iov_len) { 264 return; 265 } 266 267 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) { 268 if (data_len < iov_len) { 269 iov_len = data_len; 270 } 271 272 req->iov[req->iovcnt].iov_base = buf; 273 req->iov[req->iovcnt].iov_len = iov_len; 274 275 buf += iov_len; 276 data_len -= iov_len; 277 } 278 279 CU_ASSERT_EQUAL_FATAL(data_len, 0); 280 } 281 282 static void 283 sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len) 284 { 285 int data_len = req->data_len; 286 char *buf = req->fused_buf; 287 288 req->fused_iovcnt = 0; 289 if (!iov_len) { 290 return; 291 } 292 293 for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) { 294 if (data_len < iov_len) { 295 iov_len = data_len; 296 } 297 298 req->fused_iov[req->fused_iovcnt].iov_base = buf; 299 req->fused_iov[req->fused_iovcnt].iov_len = iov_len; 300 301 buf += iov_len; 302 data_len -= iov_len; 303 } 304 305 CU_ASSERT_EQUAL_FATAL(data_len, 0); 306 } 307 308 static void 309 blockdev_write(struct io_target *target, char *tx_buf, 310 uint64_t offset, int data_len, int iov_len) 311 { 312 struct bdevio_request req; 313 314 req.target = target; 315 req.buf = tx_buf; 316 req.data_len = data_len; 317 req.offset = offset; 318 sgl_chop_buffer(&req, iov_len); 319 320 g_completion_success = false; 321 322 execute_spdk_function(__blockdev_write, &req); 323 } 324 325 static void 326 _blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf, 327 uint64_t offset, int data_len, int iov_len) 328 { 329 struct bdevio_request req; 330 331 req.target = target; 332 req.buf = cmp_buf; 333 req.fused_buf = write_buf; 334 req.data_len = data_len; 335 req.offset = offset; 336 sgl_chop_buffer(&req, iov_len); 337 sgl_chop_fused_buffer(&req, iov_len); 338 339 g_completion_success = false; 340 341 execute_spdk_function(__blockdev_compare_and_write, &req); 342 } 343 344 static void 345 blockdev_write_zeroes(struct io_target *target, char *tx_buf, 346 uint64_t offset, int data_len) 347 { 348 struct bdevio_request req; 349 350 req.target = target; 351 req.buf = tx_buf; 352 req.data_len = data_len; 353 req.offset = offset; 354 355 g_completion_success = false; 356 357 execute_spdk_function(__blockdev_write_zeroes, &req); 358 } 359 360 static void 361 __blockdev_read(void *arg) 362 { 363 struct bdevio_request *req = arg; 364 struct io_target *target = req->target; 365 int rc; 366 367 if (req->iovcnt) { 368 rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset, 369 req->data_len, quick_test_complete, NULL); 370 } else { 371 rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset, 372 req->data_len, quick_test_complete, NULL); 373 } 374 375 if (rc) { 376 g_completion_success = false; 377 wake_ut_thread(); 378 } 379 } 380 381 static void 382 blockdev_read(struct io_target *target, char *rx_buf, 383 uint64_t offset, int data_len, int iov_len) 384 { 385 struct bdevio_request req; 386 387 req.target = target; 388 req.buf = rx_buf; 389 req.data_len = data_len; 390 req.offset = offset; 391 req.iovcnt = 0; 392 sgl_chop_buffer(&req, iov_len); 393 394 g_completion_success = false; 395 396 execute_spdk_function(__blockdev_read, &req); 397 } 398 399 static int 400 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length) 401 { 402 return memcmp(rx_buf, tx_buf, data_length); 403 } 404 405 static void 406 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset, 407 int expected_rc, bool write_zeroes) 408 { 409 struct io_target *target; 410 char *tx_buf = NULL; 411 char *rx_buf = NULL; 412 int rc; 413 uint64_t write_offset = offset; 414 uint32_t write_data_len = data_length; 415 416 target = g_current_io_target; 417 418 if (spdk_bdev_get_write_unit_size(target->bdev) > 1 && expected_rc == 0) { 419 uint32_t write_unit_bytes; 420 421 write_unit_bytes = spdk_bdev_get_write_unit_size(target->bdev) * 422 spdk_bdev_get_block_size(target->bdev); 423 write_offset -= offset % write_unit_bytes; 424 write_data_len += (offset - write_offset); 425 426 if (write_data_len % write_unit_bytes) { 427 write_data_len += write_unit_bytes - write_data_len % write_unit_bytes; 428 } 429 } 430 431 if (!write_zeroes) { 432 initialize_buffer(&tx_buf, pattern, write_data_len); 433 initialize_buffer(&rx_buf, 0, data_length); 434 435 blockdev_write(target, tx_buf, write_offset, write_data_len, iov_len); 436 } else { 437 initialize_buffer(&tx_buf, 0, write_data_len); 438 initialize_buffer(&rx_buf, pattern, data_length); 439 440 blockdev_write_zeroes(target, tx_buf, write_offset, write_data_len); 441 } 442 443 444 if (expected_rc == 0) { 445 CU_ASSERT_EQUAL(g_completion_success, true); 446 } else { 447 CU_ASSERT_EQUAL(g_completion_success, false); 448 } 449 blockdev_read(target, rx_buf, offset, data_length, iov_len); 450 451 if (expected_rc == 0) { 452 CU_ASSERT_EQUAL(g_completion_success, true); 453 } else { 454 CU_ASSERT_EQUAL(g_completion_success, false); 455 } 456 457 if (g_completion_success) { 458 rc = blockdev_write_read_data_match(rx_buf, tx_buf + (offset - write_offset), data_length); 459 /* Assert the write by comparing it with values read 460 * from each blockdev */ 461 CU_ASSERT_EQUAL(rc, 0); 462 } 463 464 spdk_free(rx_buf); 465 spdk_free(tx_buf); 466 } 467 468 static void 469 blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset) 470 { 471 struct io_target *target; 472 char *tx_buf = NULL; 473 char *write_buf = NULL; 474 char *rx_buf = NULL; 475 int rc; 476 477 target = g_current_io_target; 478 479 initialize_buffer(&tx_buf, 0xAA, data_length); 480 initialize_buffer(&rx_buf, 0, data_length); 481 initialize_buffer(&write_buf, 0xBB, data_length); 482 483 blockdev_write(target, tx_buf, offset, data_length, iov_len); 484 CU_ASSERT_EQUAL(g_completion_success, true); 485 486 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 487 CU_ASSERT_EQUAL(g_completion_success, true); 488 489 _blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len); 490 CU_ASSERT_EQUAL(g_completion_success, false); 491 492 blockdev_read(target, rx_buf, offset, data_length, iov_len); 493 CU_ASSERT_EQUAL(g_completion_success, true); 494 rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length); 495 /* Assert the write by comparing it with values read 496 * from each blockdev */ 497 CU_ASSERT_EQUAL(rc, 0); 498 499 spdk_free(rx_buf); 500 spdk_free(tx_buf); 501 spdk_free(write_buf); 502 } 503 504 static void 505 blockdev_write_read_block(void) 506 { 507 uint32_t data_length; 508 uint64_t offset; 509 int pattern; 510 int expected_rc; 511 struct io_target *target = g_current_io_target; 512 struct spdk_bdev *bdev = target->bdev; 513 514 /* Data size = 1 block */ 515 data_length = spdk_bdev_get_block_size(bdev); 516 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 517 offset = 0; 518 pattern = 0xA3; 519 /* Params are valid, hence the expected return value 520 * of write and read for all blockdevs is 0. */ 521 expected_rc = 0; 522 523 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 524 } 525 526 static void 527 blockdev_write_zeroes_read_block(void) 528 { 529 uint32_t data_length; 530 uint64_t offset; 531 int pattern; 532 int expected_rc; 533 struct io_target *target = g_current_io_target; 534 struct spdk_bdev *bdev = target->bdev; 535 536 /* Data size = 1 block */ 537 data_length = spdk_bdev_get_block_size(bdev); 538 offset = 0; 539 pattern = 0xA3; 540 /* Params are valid, hence the expected return value 541 * of write_zeroes and read for all blockdevs is 0. */ 542 expected_rc = 0; 543 544 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 545 } 546 547 /* 548 * This i/o will not have to split at the bdev layer. 549 */ 550 static void 551 blockdev_write_zeroes_read_no_split(void) 552 { 553 uint32_t data_length; 554 uint64_t offset; 555 int pattern; 556 int expected_rc; 557 struct io_target *target = g_current_io_target; 558 struct spdk_bdev *bdev = target->bdev; 559 560 /* Data size = block size aligned ZERO_BUFFER_SIZE */ 561 data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 562 data_length -= ZERO_BUFFER_SIZE % spdk_bdev_get_block_size(bdev); 563 offset = 0; 564 pattern = 0xA3; 565 /* Params are valid, hence the expected return value 566 * of write_zeroes and read for all blockdevs is 0. */ 567 expected_rc = 0; 568 569 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 570 } 571 572 /* 573 * This i/o will have to split at the bdev layer if 574 * write-zeroes is not supported by the bdev. 575 */ 576 static void 577 blockdev_write_zeroes_read_split(void) 578 { 579 uint32_t data_length; 580 uint64_t offset; 581 int pattern; 582 int expected_rc; 583 struct io_target *target = g_current_io_target; 584 struct spdk_bdev *bdev = target->bdev; 585 586 /* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */ 587 data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */ 588 data_length -= data_length % spdk_bdev_get_block_size(bdev); 589 offset = 0; 590 pattern = 0xA3; 591 /* Params are valid, hence the expected return value 592 * of write_zeroes and read for all blockdevs is 0. */ 593 expected_rc = 0; 594 595 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 596 } 597 598 /* 599 * This i/o will have to split at the bdev layer if 600 * write-zeroes is not supported by the bdev. It also 601 * tests a write size that is not an even multiple of 602 * the bdev layer zero buffer size. 603 */ 604 static void 605 blockdev_write_zeroes_read_split_partial(void) 606 { 607 uint32_t data_length; 608 uint64_t offset; 609 int pattern; 610 int expected_rc; 611 struct io_target *target = g_current_io_target; 612 struct spdk_bdev *bdev = target->bdev; 613 uint32_t block_size = spdk_bdev_get_block_size(bdev); 614 615 /* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */ 616 data_length = ZERO_BUFFER_SIZE * 7 / 2; 617 data_length -= data_length % block_size; 618 offset = 0; 619 pattern = 0xA3; 620 /* Params are valid, hence the expected return value 621 * of write_zeroes and read for all blockdevs is 0. */ 622 expected_rc = 0; 623 624 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1); 625 } 626 627 static void 628 blockdev_writev_readv_block(void) 629 { 630 uint32_t data_length, iov_len; 631 uint64_t offset; 632 int pattern; 633 int expected_rc; 634 struct io_target *target = g_current_io_target; 635 struct spdk_bdev *bdev = target->bdev; 636 637 /* Data size = 1 block */ 638 data_length = spdk_bdev_get_block_size(bdev); 639 iov_len = data_length; 640 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 641 offset = 0; 642 pattern = 0xA3; 643 /* Params are valid, hence the expected return value 644 * of write and read for all blockdevs is 0. */ 645 expected_rc = 0; 646 647 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 648 } 649 650 static void 651 blockdev_comparev_and_writev(void) 652 { 653 uint32_t data_length, iov_len; 654 uint64_t offset; 655 struct io_target *target = g_current_io_target; 656 struct spdk_bdev *bdev = target->bdev; 657 658 if (spdk_bdev_is_md_separate(bdev)) { 659 /* TODO: remove this check once bdev layer properly supports 660 * compare and write for bdevs with separate md. 661 */ 662 SPDK_ERRLOG("skipping comparev_and_writev on bdev %s since it has\n" 663 "separate metadata which is not supported yet.\n", 664 spdk_bdev_get_name(bdev)); 665 return; 666 } 667 668 /* Data size = acwu size */ 669 data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev); 670 iov_len = data_length; 671 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 672 offset = 0; 673 674 blockdev_compare_and_write(data_length, iov_len, offset); 675 } 676 677 static void 678 blockdev_writev_readv_30x1block(void) 679 { 680 uint32_t data_length, iov_len; 681 uint64_t offset; 682 int pattern; 683 int expected_rc; 684 struct io_target *target = g_current_io_target; 685 struct spdk_bdev *bdev = target->bdev; 686 uint32_t block_size = spdk_bdev_get_block_size(bdev); 687 688 /* Data size = 30 * block size */ 689 data_length = block_size * 30; 690 iov_len = block_size; 691 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 692 offset = 0; 693 pattern = 0xA3; 694 /* Params are valid, hence the expected return value 695 * of write and read for all blockdevs is 0. */ 696 expected_rc = 0; 697 698 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 699 } 700 701 static void 702 blockdev_write_read_8blocks(void) 703 { 704 uint32_t data_length; 705 uint64_t offset; 706 int pattern; 707 int expected_rc; 708 struct io_target *target = g_current_io_target; 709 struct spdk_bdev *bdev = target->bdev; 710 711 /* Data size = 8 * block size */ 712 data_length = spdk_bdev_get_block_size(bdev) * 8; 713 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 714 offset = data_length; 715 pattern = 0xA3; 716 /* Params are valid, hence the expected return value 717 * of write and read for all blockdevs is 0. */ 718 expected_rc = 0; 719 720 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 721 } 722 723 static void 724 blockdev_writev_readv_8blocks(void) 725 { 726 uint32_t data_length, iov_len; 727 uint64_t offset; 728 int pattern; 729 int expected_rc; 730 struct io_target *target = g_current_io_target; 731 struct spdk_bdev *bdev = target->bdev; 732 733 /* Data size = 8 * block size */ 734 data_length = spdk_bdev_get_block_size(bdev) * 8; 735 iov_len = data_length; 736 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 737 offset = data_length; 738 pattern = 0xA3; 739 /* Params are valid, hence the expected return value 740 * of write and read for all blockdevs is 0. */ 741 expected_rc = 0; 742 743 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 744 } 745 746 static void 747 blockdev_write_read_size_gt_128k(void) 748 { 749 uint32_t data_length; 750 uint64_t offset; 751 int pattern; 752 int expected_rc; 753 struct io_target *target = g_current_io_target; 754 struct spdk_bdev *bdev = target->bdev; 755 uint32_t block_size = spdk_bdev_get_block_size(bdev); 756 757 /* Data size = block size aligned 128K + 1 block */ 758 data_length = 128 * 1024; 759 data_length -= data_length % block_size; 760 data_length += block_size; 761 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 762 offset = block_size * 2; 763 pattern = 0xA3; 764 /* Params are valid, hence the expected return value 765 * of write and read for all blockdevs is 0. */ 766 expected_rc = 0; 767 768 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 769 } 770 771 static void 772 blockdev_writev_readv_size_gt_128k(void) 773 { 774 uint32_t data_length, iov_len; 775 uint64_t offset; 776 int pattern; 777 int expected_rc; 778 struct io_target *target = g_current_io_target; 779 struct spdk_bdev *bdev = target->bdev; 780 uint32_t block_size = spdk_bdev_get_block_size(bdev); 781 782 /* Data size = block size aligned 128K + 1 block */ 783 data_length = 128 * 1024; 784 data_length -= data_length % block_size; 785 data_length += block_size; 786 iov_len = data_length; 787 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 788 offset = block_size * 2; 789 pattern = 0xA3; 790 /* Params are valid, hence the expected return value 791 * of write and read for all blockdevs is 0. */ 792 expected_rc = 0; 793 794 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 795 } 796 797 static void 798 blockdev_writev_readv_size_gt_128k_two_iov(void) 799 { 800 uint32_t data_length, iov_len; 801 uint64_t offset; 802 int pattern; 803 int expected_rc; 804 struct io_target *target = g_current_io_target; 805 struct spdk_bdev *bdev = target->bdev; 806 uint32_t block_size = spdk_bdev_get_block_size(bdev); 807 808 /* Data size = block size aligned 128K + 1 block */ 809 data_length = 128 * 1024; 810 data_length -= data_length % block_size; 811 iov_len = data_length; 812 data_length += block_size; 813 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 814 offset = block_size * 2; 815 pattern = 0xA3; 816 /* Params are valid, hence the expected return value 817 * of write and read for all blockdevs is 0. */ 818 expected_rc = 0; 819 820 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0); 821 } 822 823 static void 824 blockdev_write_read_invalid_size(void) 825 { 826 uint32_t data_length; 827 uint64_t offset; 828 int pattern; 829 int expected_rc; 830 struct io_target *target = g_current_io_target; 831 struct spdk_bdev *bdev = target->bdev; 832 uint32_t block_size = spdk_bdev_get_block_size(bdev); 833 834 /* Data size is not a multiple of the block size */ 835 data_length = block_size - 1; 836 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 837 offset = block_size * 2; 838 pattern = 0xA3; 839 /* Params are invalid, hence the expected return value 840 * of write and read for all blockdevs is < 0 */ 841 expected_rc = -1; 842 843 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 844 } 845 846 static void 847 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void) 848 { 849 uint32_t data_length; 850 uint64_t offset; 851 int pattern; 852 int expected_rc; 853 struct io_target *target = g_current_io_target; 854 struct spdk_bdev *bdev = target->bdev; 855 uint32_t block_size = spdk_bdev_get_block_size(bdev); 856 857 data_length = block_size; 858 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 859 /* The start offset has been set to a marginal value 860 * such that offset + nbytes == Total size of 861 * blockdev. */ 862 offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size); 863 pattern = 0xA3; 864 /* Params are valid, hence the expected return value 865 * of write and read for all blockdevs is 0. */ 866 expected_rc = 0; 867 868 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 869 } 870 871 static void 872 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void) 873 { 874 uint32_t data_length; 875 uint64_t offset; 876 int pattern; 877 int expected_rc; 878 struct io_target *target = g_current_io_target; 879 struct spdk_bdev *bdev = target->bdev; 880 uint32_t block_size = spdk_bdev_get_block_size(bdev); 881 882 /* Tests the overflow condition of the blockdevs. */ 883 data_length = block_size * 2; 884 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 885 pattern = 0xA3; 886 887 /* The start offset has been set to a valid value 888 * but offset + nbytes is greater than the Total size 889 * of the blockdev. The test should fail. */ 890 offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size; 891 /* Params are invalid, hence the expected return value 892 * of write and read for all blockdevs is < 0 */ 893 expected_rc = -1; 894 895 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 896 } 897 898 static void 899 blockdev_write_read_max_offset(void) 900 { 901 int data_length; 902 uint64_t offset; 903 int pattern; 904 int expected_rc; 905 struct io_target *target = g_current_io_target; 906 struct spdk_bdev *bdev = target->bdev; 907 908 data_length = spdk_bdev_get_block_size(bdev); 909 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 910 /* The start offset has been set to UINT64_MAX such that 911 * adding nbytes wraps around and points to an invalid address. */ 912 offset = UINT64_MAX; 913 pattern = 0xA3; 914 /* Params are invalid, hence the expected return value 915 * of write and read for all blockdevs is < 0 */ 916 expected_rc = -1; 917 918 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 919 } 920 921 static void 922 blockdev_overlapped_write_read_2blocks(void) 923 { 924 int data_length; 925 uint64_t offset; 926 int pattern; 927 int expected_rc; 928 struct io_target *target = g_current_io_target; 929 struct spdk_bdev *bdev = target->bdev; 930 931 /* Data size = 2 blocks */ 932 data_length = spdk_bdev_get_block_size(bdev) * 2; 933 CU_ASSERT_TRUE(data_length < BUFFER_SIZE); 934 offset = 0; 935 pattern = 0xA3; 936 /* Params are valid, hence the expected return value 937 * of write and read for all blockdevs is 0. */ 938 expected_rc = 0; 939 /* Assert the write by comparing it with values read 940 * from the same offset for each blockdev */ 941 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 942 943 /* Overwrite the pattern 0xbb of size 2*block size on an address offset 944 * overlapping with the address written above and assert the new value in 945 * the overlapped address range */ 946 /* Populate 2*block size with value 0xBB */ 947 pattern = 0xBB; 948 /* Offset = 1 block; Overlap offset addresses and write value 0xbb */ 949 offset = spdk_bdev_get_block_size(bdev); 950 /* Assert the write by comparing it with values read 951 * from the overlapped offset for each blockdev */ 952 blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0); 953 } 954 955 static void 956 __blockdev_reset(void *arg) 957 { 958 struct bdevio_request *req = arg; 959 struct io_target *target = req->target; 960 int rc; 961 962 rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL); 963 if (rc < 0) { 964 g_completion_success = false; 965 wake_ut_thread(); 966 } 967 } 968 969 static void 970 blockdev_test_reset(void) 971 { 972 struct bdevio_request req; 973 struct io_target *target; 974 bool reset_supported; 975 976 target = g_current_io_target; 977 req.target = target; 978 979 reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET); 980 g_completion_success = false; 981 982 execute_spdk_function(__blockdev_reset, &req); 983 984 CU_ASSERT_EQUAL(g_completion_success, reset_supported); 985 } 986 987 struct bdevio_passthrough_request { 988 struct spdk_nvme_cmd cmd; 989 void *buf; 990 uint32_t len; 991 struct io_target *target; 992 int sct; 993 int sc; 994 uint32_t cdw0; 995 }; 996 997 static void 998 nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg) 999 { 1000 struct bdevio_passthrough_request *pt_req = arg; 1001 1002 spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc); 1003 spdk_bdev_free_io(bdev_io); 1004 wake_ut_thread(); 1005 } 1006 1007 static void 1008 __blockdev_nvme_passthru(void *arg) 1009 { 1010 struct bdevio_passthrough_request *pt_req = arg; 1011 struct io_target *target = pt_req->target; 1012 int rc; 1013 1014 rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch, 1015 &pt_req->cmd, pt_req->buf, pt_req->len, 1016 nvme_pt_test_complete, pt_req); 1017 if (rc) { 1018 wake_ut_thread(); 1019 } 1020 } 1021 1022 static void 1023 blockdev_test_nvme_passthru_rw(void) 1024 { 1025 struct bdevio_passthrough_request pt_req; 1026 void *write_buf, *read_buf; 1027 struct io_target *target; 1028 1029 target = g_current_io_target; 1030 1031 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1032 return; 1033 } 1034 1035 memset(&pt_req, 0, sizeof(pt_req)); 1036 pt_req.target = target; 1037 pt_req.cmd.opc = SPDK_NVME_OPC_WRITE; 1038 pt_req.cmd.nsid = 1; 1039 *(uint64_t *)&pt_req.cmd.cdw10 = 4; 1040 pt_req.cmd.cdw12 = 0; 1041 1042 pt_req.len = spdk_bdev_get_block_size(target->bdev); 1043 write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1044 memset(write_buf, 0xA5, pt_req.len); 1045 pt_req.buf = write_buf; 1046 1047 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1048 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1049 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1050 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1051 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1052 1053 pt_req.cmd.opc = SPDK_NVME_OPC_READ; 1054 read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1055 pt_req.buf = read_buf; 1056 1057 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1058 pt_req.sc = SPDK_NVME_SC_INVALID_FIELD; 1059 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1060 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1061 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1062 1063 CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len)); 1064 spdk_free(read_buf); 1065 spdk_free(write_buf); 1066 } 1067 1068 static void 1069 blockdev_test_nvme_passthru_vendor_specific(void) 1070 { 1071 struct bdevio_passthrough_request pt_req; 1072 struct io_target *target; 1073 1074 target = g_current_io_target; 1075 1076 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) { 1077 return; 1078 } 1079 1080 memset(&pt_req, 0, sizeof(pt_req)); 1081 pt_req.target = target; 1082 pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */ 1083 pt_req.cmd.nsid = 1; 1084 1085 pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; 1086 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1087 pt_req.cdw0 = 0xbeef; 1088 execute_spdk_function(__blockdev_nvme_passthru, &pt_req); 1089 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1090 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE); 1091 CU_ASSERT(pt_req.cdw0 == 0x0); 1092 } 1093 1094 static void 1095 __blockdev_nvme_admin_passthru(void *arg) 1096 { 1097 struct bdevio_passthrough_request *pt_req = arg; 1098 struct io_target *target = pt_req->target; 1099 int rc; 1100 1101 rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch, 1102 &pt_req->cmd, pt_req->buf, pt_req->len, 1103 nvme_pt_test_complete, pt_req); 1104 if (rc) { 1105 wake_ut_thread(); 1106 } 1107 } 1108 1109 static void 1110 blockdev_test_nvme_admin_passthru(void) 1111 { 1112 struct io_target *target; 1113 struct bdevio_passthrough_request pt_req; 1114 1115 target = g_current_io_target; 1116 1117 if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) { 1118 return; 1119 } 1120 1121 memset(&pt_req, 0, sizeof(pt_req)); 1122 pt_req.target = target; 1123 pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY; 1124 pt_req.cmd.nsid = 0; 1125 *(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR; 1126 1127 pt_req.len = sizeof(struct spdk_nvme_ctrlr_data); 1128 pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1129 1130 pt_req.sct = SPDK_NVME_SCT_GENERIC; 1131 pt_req.sc = SPDK_NVME_SC_SUCCESS; 1132 execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req); 1133 CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC); 1134 CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS); 1135 } 1136 1137 static void 1138 __stop_init_thread(void *arg) 1139 { 1140 unsigned num_failures = g_num_failures; 1141 struct spdk_jsonrpc_request *request = arg; 1142 1143 g_num_failures = 0; 1144 1145 bdevio_cleanup_targets(); 1146 if (g_wait_for_tests && !g_shutdown) { 1147 /* Do not stop the app yet, wait for another RPC */ 1148 rpc_perform_tests_cb(num_failures, request); 1149 return; 1150 } 1151 spdk_app_stop(num_failures); 1152 } 1153 1154 static void 1155 stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request) 1156 { 1157 g_num_failures = num_failures; 1158 1159 spdk_thread_send_msg(g_thread_init, __stop_init_thread, request); 1160 } 1161 1162 static int 1163 suite_init(void) 1164 { 1165 if (g_current_io_target == NULL) { 1166 g_current_io_target = g_io_targets; 1167 } 1168 return 0; 1169 } 1170 1171 static int 1172 suite_fini(void) 1173 { 1174 g_current_io_target = g_current_io_target->next; 1175 return 0; 1176 } 1177 1178 #define SUITE_NAME_MAX 64 1179 1180 static int 1181 __setup_ut_on_single_target(struct io_target *target) 1182 { 1183 unsigned rc = 0; 1184 CU_pSuite suite = NULL; 1185 char name[SUITE_NAME_MAX]; 1186 1187 snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev)); 1188 suite = CU_add_suite(name, suite_init, suite_fini); 1189 if (suite == NULL) { 1190 CU_cleanup_registry(); 1191 rc = CU_get_error(); 1192 return -rc; 1193 } 1194 1195 if ( 1196 CU_add_test(suite, "blockdev write read block", 1197 blockdev_write_read_block) == NULL 1198 || CU_add_test(suite, "blockdev write zeroes read block", 1199 blockdev_write_zeroes_read_block) == NULL 1200 || CU_add_test(suite, "blockdev write zeroes read no split", 1201 blockdev_write_zeroes_read_no_split) == NULL 1202 || CU_add_test(suite, "blockdev write zeroes read split", 1203 blockdev_write_zeroes_read_split) == NULL 1204 || CU_add_test(suite, "blockdev write zeroes read split partial", 1205 blockdev_write_zeroes_read_split_partial) == NULL 1206 || CU_add_test(suite, "blockdev reset", 1207 blockdev_test_reset) == NULL 1208 || CU_add_test(suite, "blockdev write read 8 blocks", 1209 blockdev_write_read_8blocks) == NULL 1210 || CU_add_test(suite, "blockdev write read size > 128k", 1211 blockdev_write_read_size_gt_128k) == NULL 1212 || CU_add_test(suite, "blockdev write read invalid size", 1213 blockdev_write_read_invalid_size) == NULL 1214 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev", 1215 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL 1216 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev", 1217 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL 1218 || CU_add_test(suite, "blockdev write read max offset", 1219 blockdev_write_read_max_offset) == NULL 1220 || CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset", 1221 blockdev_overlapped_write_read_2blocks) == NULL 1222 || CU_add_test(suite, "blockdev writev readv 8 blocks", 1223 blockdev_writev_readv_8blocks) == NULL 1224 || CU_add_test(suite, "blockdev writev readv 30 x 1block", 1225 blockdev_writev_readv_30x1block) == NULL 1226 || CU_add_test(suite, "blockdev writev readv block", 1227 blockdev_writev_readv_block) == NULL 1228 || CU_add_test(suite, "blockdev writev readv size > 128k", 1229 blockdev_writev_readv_size_gt_128k) == NULL 1230 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs", 1231 blockdev_writev_readv_size_gt_128k_two_iov) == NULL 1232 || CU_add_test(suite, "blockdev comparev and writev", 1233 blockdev_comparev_and_writev) == NULL 1234 || CU_add_test(suite, "blockdev nvme passthru rw", 1235 blockdev_test_nvme_passthru_rw) == NULL 1236 || CU_add_test(suite, "blockdev nvme passthru vendor specific", 1237 blockdev_test_nvme_passthru_vendor_specific) == NULL 1238 || CU_add_test(suite, "blockdev nvme admin passthru", 1239 blockdev_test_nvme_admin_passthru) == NULL 1240 ) { 1241 CU_cleanup_registry(); 1242 rc = CU_get_error(); 1243 return -rc; 1244 } 1245 return 0; 1246 } 1247 1248 static void 1249 __run_ut_thread(void *arg) 1250 { 1251 struct spdk_jsonrpc_request *request = arg; 1252 int rc = 0; 1253 struct io_target *target; 1254 unsigned num_failures; 1255 1256 if (CU_initialize_registry() != CUE_SUCCESS) { 1257 /* CUnit error, probably won't recover */ 1258 rc = CU_get_error(); 1259 stop_init_thread(-rc, request); 1260 } 1261 1262 target = g_io_targets; 1263 while (target != NULL) { 1264 rc = __setup_ut_on_single_target(target); 1265 if (rc < 0) { 1266 /* CUnit error, probably won't recover */ 1267 stop_init_thread(-rc, request); 1268 } 1269 target = target->next; 1270 } 1271 CU_basic_set_mode(CU_BRM_VERBOSE); 1272 CU_basic_run_tests(); 1273 num_failures = CU_get_number_of_failures(); 1274 CU_cleanup_registry(); 1275 1276 stop_init_thread(num_failures, request); 1277 } 1278 1279 static void 1280 __construct_targets(void *arg) 1281 { 1282 if (bdevio_construct_targets() < 0) { 1283 spdk_app_stop(-1); 1284 return; 1285 } 1286 1287 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL); 1288 } 1289 1290 static void 1291 test_main(void *arg1) 1292 { 1293 struct spdk_cpuset tmpmask = {}; 1294 uint32_t i; 1295 1296 pthread_mutex_init(&g_test_mutex, NULL); 1297 pthread_cond_init(&g_test_cond, NULL); 1298 1299 /* This test runs specifically on at least three cores. 1300 * g_thread_init is the app_thread on main core from event framework. 1301 * Next two are only for the tests and should always be on separate CPU cores. */ 1302 if (spdk_env_get_core_count() < 3) { 1303 spdk_app_stop(-1); 1304 return; 1305 } 1306 1307 SPDK_ENV_FOREACH_CORE(i) { 1308 if (i == spdk_env_get_current_core()) { 1309 g_thread_init = spdk_get_thread(); 1310 continue; 1311 } 1312 spdk_cpuset_zero(&tmpmask); 1313 spdk_cpuset_set_cpu(&tmpmask, i, true); 1314 if (g_thread_ut == NULL) { 1315 g_thread_ut = spdk_thread_create("ut_thread", &tmpmask); 1316 } else if (g_thread_io == NULL) { 1317 g_thread_io = spdk_thread_create("io_thread", &tmpmask); 1318 } 1319 1320 } 1321 1322 if (g_wait_for_tests) { 1323 /* Do not perform any tests until RPC is received */ 1324 return; 1325 } 1326 1327 spdk_thread_send_msg(g_thread_init, __construct_targets, NULL); 1328 } 1329 1330 static void 1331 bdevio_usage(void) 1332 { 1333 printf(" -w start bdevio app and wait for RPC to start the tests\n"); 1334 } 1335 1336 static int 1337 bdevio_parse_arg(int ch, char *arg) 1338 { 1339 switch (ch) { 1340 case 'w': 1341 g_wait_for_tests = true; 1342 break; 1343 default: 1344 return -EINVAL; 1345 } 1346 return 0; 1347 } 1348 1349 struct rpc_perform_tests { 1350 char *name; 1351 }; 1352 1353 static void 1354 free_rpc_perform_tests(struct rpc_perform_tests *r) 1355 { 1356 free(r->name); 1357 } 1358 1359 static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = { 1360 {"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true}, 1361 }; 1362 1363 static void 1364 rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request) 1365 { 1366 struct spdk_json_write_ctx *w; 1367 1368 if (num_failures == 0) { 1369 w = spdk_jsonrpc_begin_result(request); 1370 spdk_json_write_uint32(w, num_failures); 1371 spdk_jsonrpc_end_result(request, w); 1372 } else { 1373 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1374 "%d test cases failed", num_failures); 1375 } 1376 } 1377 1378 static void 1379 rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) 1380 { 1381 struct rpc_perform_tests req = {NULL}; 1382 struct spdk_bdev *bdev; 1383 int rc; 1384 1385 if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders, 1386 SPDK_COUNTOF(rpc_perform_tests_decoders), 1387 &req)) { 1388 SPDK_ERRLOG("spdk_json_decode_object failed\n"); 1389 spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); 1390 goto invalid; 1391 } 1392 1393 if (req.name) { 1394 bdev = spdk_bdev_get_by_name(req.name); 1395 if (bdev == NULL) { 1396 SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name); 1397 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1398 "Bdev '%s' does not exist: %s", 1399 req.name, spdk_strerror(ENODEV)); 1400 goto invalid; 1401 } 1402 rc = bdevio_construct_target(bdev); 1403 if (rc < 0) { 1404 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev)); 1405 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1406 "Could not construct target for bdev '%s': %s", 1407 spdk_bdev_get_name(bdev), spdk_strerror(-rc)); 1408 goto invalid; 1409 } 1410 } else { 1411 rc = bdevio_construct_targets(); 1412 if (rc < 0) { 1413 SPDK_ERRLOG("Could not construct targets for all bdevs\n"); 1414 spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, 1415 "Could not construct targets for all bdevs: %s", 1416 spdk_strerror(-rc)); 1417 goto invalid; 1418 } 1419 } 1420 free_rpc_perform_tests(&req); 1421 1422 spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request); 1423 1424 return; 1425 1426 invalid: 1427 free_rpc_perform_tests(&req); 1428 } 1429 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME) 1430 1431 static void 1432 spdk_bdevio_shutdown_cb(void) 1433 { 1434 g_shutdown = true; 1435 spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL); 1436 } 1437 1438 int 1439 main(int argc, char **argv) 1440 { 1441 int rc; 1442 struct spdk_app_opts opts = {}; 1443 1444 spdk_app_opts_init(&opts, sizeof(opts)); 1445 opts.name = "bdevio"; 1446 opts.reactor_mask = "0x7"; 1447 opts.shutdown_cb = spdk_bdevio_shutdown_cb; 1448 1449 if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL, 1450 bdevio_parse_arg, bdevio_usage)) != 1451 SPDK_APP_PARSE_ARGS_SUCCESS) { 1452 return rc; 1453 } 1454 1455 rc = spdk_app_start(&opts, test_main, NULL); 1456 spdk_app_fini(); 1457 1458 return rc; 1459 } 1460