1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 #include "common/lib/bdev/common_stubs.h" 19 20 #define BDEV_UT_NUM_THREADS 3 21 22 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 23 int 24 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 25 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 26 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 27 { 28 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 29 30 cpl_cb(cpl_cb_arg, 0); 31 return 0; 32 } 33 34 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 35 int 36 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 37 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 38 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 39 { 40 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 41 42 cpl_cb(cpl_cb_arg, 0); 43 return 0; 44 } 45 46 static int g_accel_io_device; 47 48 struct spdk_io_channel * 49 spdk_accel_get_io_channel(void) 50 { 51 return spdk_get_io_channel(&g_accel_io_device); 52 } 53 54 struct ut_bdev { 55 struct spdk_bdev bdev; 56 void *io_target; 57 }; 58 59 struct ut_bdev_io { 60 TAILQ_ENTRY(ut_bdev_io) link; 61 }; 62 63 struct ut_bdev_channel { 64 TAILQ_HEAD(, ut_bdev_io) outstanding_io; 65 uint32_t outstanding_cnt; 66 uint32_t avail_cnt; 67 struct spdk_thread *thread; 68 TAILQ_ENTRY(ut_bdev_channel) link; 69 }; 70 71 int g_io_device; 72 struct ut_bdev g_bdev; 73 struct spdk_bdev_desc *g_desc; 74 bool g_teardown_done = false; 75 bool g_get_io_channel = true; 76 bool g_create_ch = true; 77 bool g_init_complete_called = false; 78 bool g_fini_start_called = true; 79 int g_status = 0; 80 int g_count = 0; 81 struct spdk_histogram_data *g_histogram = NULL; 82 TAILQ_HEAD(, ut_bdev_channel) g_ut_channels; 83 84 static int 85 ut_accel_ch_create_cb(void *io_device, void *ctx) 86 { 87 return 0; 88 } 89 90 static void 91 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 92 { 93 } 94 95 static int 96 stub_create_ch(void *io_device, void *ctx_buf) 97 { 98 struct ut_bdev_channel *ch = ctx_buf; 99 100 if (g_create_ch == false) { 101 return -1; 102 } 103 104 TAILQ_INIT(&ch->outstanding_io); 105 ch->outstanding_cnt = 0; 106 /* 107 * When avail gets to 0, the submit_request function will return ENOMEM. 108 * Most tests to not want ENOMEM to occur, so by default set this to a 109 * big value that won't get hit. The ENOMEM tests can then override this 110 * value to something much smaller to induce ENOMEM conditions. 111 */ 112 ch->avail_cnt = 2048; 113 ch->thread = spdk_get_thread(); 114 115 TAILQ_INSERT_TAIL(&g_ut_channels, ch, link); 116 117 return 0; 118 } 119 120 static void 121 stub_destroy_ch(void *io_device, void *ctx_buf) 122 { 123 struct ut_bdev_channel *ch = ctx_buf; 124 125 TAILQ_REMOVE(&g_ut_channels, ch, link); 126 } 127 128 static struct spdk_io_channel * 129 stub_get_io_channel(void *ctx) 130 { 131 struct ut_bdev *ut_bdev = ctx; 132 133 if (g_get_io_channel == true) { 134 return spdk_get_io_channel(ut_bdev->io_target); 135 } else { 136 return NULL; 137 } 138 } 139 140 static int 141 stub_destruct(void *ctx) 142 { 143 return 0; 144 } 145 146 static void 147 stub_reset_channel(void *ctx) 148 { 149 struct ut_bdev_channel *ch = ctx; 150 struct ut_bdev_io *bio; 151 152 while (!TAILQ_EMPTY(&ch->outstanding_io)) { 153 bio = TAILQ_FIRST(&ch->outstanding_io); 154 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 155 ch->outstanding_cnt--; 156 spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bio), SPDK_BDEV_IO_STATUS_ABORTED); 157 ch->avail_cnt++; 158 } 159 } 160 161 static void 162 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 163 { 164 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch), *tmp_ch; 165 struct spdk_bdev_io *io; 166 struct ut_bdev_io *bio; 167 168 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { 169 TAILQ_FOREACH(tmp_ch, &g_ut_channels, link) { 170 if (spdk_get_thread() == tmp_ch->thread) { 171 stub_reset_channel(tmp_ch); 172 } else { 173 spdk_thread_send_msg(tmp_ch->thread, stub_reset_channel, tmp_ch); 174 } 175 } 176 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 177 TAILQ_FOREACH(bio, &ch->outstanding_io, link) { 178 io = spdk_bdev_io_from_ctx(bio); 179 if (io == bdev_io->u.abort.bio_to_abort) { 180 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 181 ch->outstanding_cnt--; 182 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED); 183 ch->avail_cnt++; 184 185 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 186 return; 187 } 188 } 189 190 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 191 return; 192 } 193 194 if (ch->avail_cnt > 0) { 195 TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct ut_bdev_io *)bdev_io->driver_ctx, link); 196 ch->outstanding_cnt++; 197 ch->avail_cnt--; 198 } else { 199 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 200 } 201 } 202 203 static uint32_t 204 stub_complete_io(void *io_target, uint32_t num_to_complete) 205 { 206 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 207 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 208 struct ut_bdev_io *bio; 209 struct spdk_bdev_io *io; 210 bool complete_all = (num_to_complete == 0); 211 uint32_t num_completed = 0; 212 213 while (complete_all || num_completed < num_to_complete) { 214 if (TAILQ_EMPTY(&ch->outstanding_io)) { 215 break; 216 } 217 bio = TAILQ_FIRST(&ch->outstanding_io); 218 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 219 io = spdk_bdev_io_from_ctx(bio); 220 ch->outstanding_cnt--; 221 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS); 222 ch->avail_cnt++; 223 num_completed++; 224 } 225 spdk_put_io_channel(_ch); 226 return num_completed; 227 } 228 229 static bool 230 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type) 231 { 232 return true; 233 } 234 235 static struct spdk_bdev_fn_table fn_table = { 236 .get_io_channel = stub_get_io_channel, 237 .destruct = stub_destruct, 238 .submit_request = stub_submit_request, 239 .io_type_supported = stub_io_type_supported, 240 }; 241 242 struct spdk_bdev_module bdev_ut_if; 243 244 static int 245 module_init(void) 246 { 247 spdk_bdev_module_init_done(&bdev_ut_if); 248 return 0; 249 } 250 251 static void 252 module_fini(void) 253 { 254 } 255 256 static void 257 init_complete(void) 258 { 259 g_init_complete_called = true; 260 } 261 262 static void 263 fini_start(void) 264 { 265 g_fini_start_called = true; 266 } 267 268 static int 269 get_ctx_size(void) 270 { 271 return sizeof(struct ut_bdev_io); 272 } 273 274 struct spdk_bdev_module bdev_ut_if = { 275 .name = "bdev_ut", 276 .module_init = module_init, 277 .module_fini = module_fini, 278 .async_init = true, 279 .init_complete = init_complete, 280 .fini_start = fini_start, 281 .get_ctx_size = get_ctx_size, 282 }; 283 284 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 285 286 static void 287 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target) 288 { 289 memset(ut_bdev, 0, sizeof(*ut_bdev)); 290 291 ut_bdev->io_target = io_target; 292 ut_bdev->bdev.ctxt = ut_bdev; 293 ut_bdev->bdev.name = name; 294 ut_bdev->bdev.fn_table = &fn_table; 295 ut_bdev->bdev.module = &bdev_ut_if; 296 ut_bdev->bdev.blocklen = 4096; 297 ut_bdev->bdev.blockcnt = 1024; 298 299 spdk_bdev_register(&ut_bdev->bdev); 300 } 301 302 static void 303 unregister_bdev(struct ut_bdev *ut_bdev) 304 { 305 /* Handle any deferred messages. */ 306 poll_threads(); 307 spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL); 308 /* Handle the async bdev unregister. */ 309 poll_threads(); 310 } 311 312 static void 313 bdev_init_cb(void *done, int rc) 314 { 315 CU_ASSERT(rc == 0); 316 *(bool *)done = true; 317 } 318 319 static void 320 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 321 void *event_ctx) 322 { 323 switch (type) { 324 case SPDK_BDEV_EVENT_REMOVE: 325 if (event_ctx != NULL) { 326 *(bool *)event_ctx = true; 327 } 328 break; 329 case SPDK_BDEV_EVENT_RESIZE: 330 if (event_ctx != NULL) { 331 *(int *)event_ctx += 1; 332 } 333 break; 334 default: 335 CU_ASSERT(false); 336 break; 337 } 338 } 339 340 static void 341 setup_test(void) 342 { 343 bool done = false; 344 int rc; 345 346 TAILQ_INIT(&g_ut_channels); 347 348 allocate_cores(BDEV_UT_NUM_THREADS); 349 allocate_threads(BDEV_UT_NUM_THREADS); 350 set_thread(0); 351 352 rc = spdk_iobuf_initialize(); 353 CU_ASSERT(rc == 0); 354 spdk_bdev_initialize(bdev_init_cb, &done); 355 spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch, 356 sizeof(struct ut_bdev_channel), NULL); 357 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 358 ut_accel_ch_destroy_cb, 0, NULL); 359 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 360 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 361 } 362 363 static void 364 finish_cb(void *cb_arg) 365 { 366 g_teardown_done = true; 367 } 368 369 static void 370 teardown_test(void) 371 { 372 set_thread(0); 373 g_teardown_done = false; 374 spdk_bdev_close(g_desc); 375 g_desc = NULL; 376 unregister_bdev(&g_bdev); 377 spdk_io_device_unregister(&g_io_device, NULL); 378 spdk_bdev_finish(finish_cb, NULL); 379 spdk_io_device_unregister(&g_accel_io_device, NULL); 380 spdk_iobuf_finish(finish_cb, NULL); 381 poll_threads(); 382 memset(&g_bdev, 0, sizeof(g_bdev)); 383 CU_ASSERT(g_teardown_done == true); 384 g_teardown_done = false; 385 free_threads(); 386 free_cores(); 387 CU_ASSERT(TAILQ_EMPTY(&g_ut_channels)) 388 } 389 390 static uint32_t 391 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq) 392 { 393 struct spdk_bdev_io *io; 394 uint32_t cnt = 0; 395 396 TAILQ_FOREACH(io, tailq, internal.link) { 397 cnt++; 398 } 399 400 return cnt; 401 } 402 403 static void 404 basic(void) 405 { 406 g_init_complete_called = false; 407 setup_test(); 408 CU_ASSERT(g_init_complete_called == true); 409 410 set_thread(0); 411 412 g_get_io_channel = false; 413 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 414 CU_ASSERT(g_ut_threads[0].ch == NULL); 415 416 g_get_io_channel = true; 417 g_create_ch = false; 418 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 419 CU_ASSERT(g_ut_threads[0].ch == NULL); 420 421 g_get_io_channel = true; 422 g_create_ch = true; 423 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 424 CU_ASSERT(g_ut_threads[0].ch != NULL); 425 spdk_put_io_channel(g_ut_threads[0].ch); 426 427 g_fini_start_called = false; 428 teardown_test(); 429 CU_ASSERT(g_fini_start_called == true); 430 } 431 432 static void 433 _bdev_unregistered(void *done, int rc) 434 { 435 CU_ASSERT(rc == 0); 436 *(bool *)done = true; 437 } 438 439 static void 440 unregister_and_close(void) 441 { 442 bool done, remove_notify; 443 struct spdk_bdev_desc *desc = NULL; 444 445 setup_test(); 446 set_thread(0); 447 448 /* setup_test() automatically opens the bdev, 449 * but this test needs to do that in a different 450 * way. */ 451 spdk_bdev_close(g_desc); 452 poll_threads(); 453 454 /* Try hotremoving a bdev with descriptors which don't provide 455 * any context to the notification callback */ 456 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc); 457 SPDK_CU_ASSERT_FATAL(desc != NULL); 458 459 /* There is an open descriptor on the device. Unregister it, 460 * which can't proceed until the descriptor is closed. */ 461 done = false; 462 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); 463 464 /* Poll the threads to allow all events to be processed */ 465 poll_threads(); 466 467 /* Make sure the bdev was not unregistered. We still have a 468 * descriptor open */ 469 CU_ASSERT(done == false); 470 471 spdk_bdev_close(desc); 472 poll_threads(); 473 desc = NULL; 474 475 /* The unregister should have completed */ 476 CU_ASSERT(done == true); 477 478 479 /* Register the bdev again */ 480 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 481 482 remove_notify = false; 483 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc); 484 SPDK_CU_ASSERT_FATAL(desc != NULL); 485 CU_ASSERT(remove_notify == false); 486 487 /* There is an open descriptor on the device. Unregister it, 488 * which can't proceed until the descriptor is closed. */ 489 done = false; 490 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); 491 /* No polling has occurred, so neither of these should execute */ 492 CU_ASSERT(remove_notify == false); 493 CU_ASSERT(done == false); 494 495 /* Prior to the unregister completing, close the descriptor */ 496 spdk_bdev_close(desc); 497 498 /* Poll the threads to allow all events to be processed */ 499 poll_threads(); 500 501 /* Remove notify should not have been called because the 502 * descriptor is already closed. */ 503 CU_ASSERT(remove_notify == false); 504 505 /* The unregister should have completed */ 506 CU_ASSERT(done == true); 507 508 /* Restore the original g_bdev so that we can use teardown_test(). */ 509 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 510 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 511 teardown_test(); 512 } 513 514 static void 515 unregister_and_close_different_threads(void) 516 { 517 bool done; 518 struct spdk_bdev_desc *desc = NULL; 519 520 setup_test(); 521 set_thread(0); 522 523 /* setup_test() automatically opens the bdev, 524 * but this test needs to do that in a different 525 * way. */ 526 spdk_bdev_close(g_desc); 527 poll_threads(); 528 529 set_thread(1); 530 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc); 531 SPDK_CU_ASSERT_FATAL(desc != NULL); 532 done = false; 533 534 set_thread(0); 535 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); 536 537 /* Poll the threads to allow all events to be processed */ 538 poll_threads(); 539 540 /* Make sure the bdev was not unregistered. We still have a 541 * descriptor open */ 542 CU_ASSERT(done == false); 543 544 /* Close the descriptor on thread 1. Poll the thread and confirm the 545 * unregister did not complete, since it was unregistered on thread 0. 546 */ 547 set_thread(1); 548 spdk_bdev_close(desc); 549 poll_thread(1); 550 CU_ASSERT(done == false); 551 552 /* Now poll thread 0 and confirm the unregister completed. */ 553 set_thread(0); 554 poll_thread(0); 555 CU_ASSERT(done == true); 556 557 /* Restore the original g_bdev so that we can use teardown_test(). */ 558 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 559 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 560 teardown_test(); 561 } 562 563 static void 564 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 565 { 566 bool *done = cb_arg; 567 568 CU_ASSERT(success == true); 569 *done = true; 570 spdk_bdev_free_io(bdev_io); 571 } 572 573 static void 574 put_channel_during_reset(void) 575 { 576 struct spdk_io_channel *io_ch; 577 bool done = false; 578 uint32_t num_completed; 579 580 setup_test(); 581 582 set_thread(0); 583 io_ch = spdk_bdev_get_io_channel(g_desc); 584 CU_ASSERT(io_ch != NULL); 585 586 /* 587 * Start a reset, but then put the I/O channel before 588 * the deferred messages for the reset get a chance to 589 * execute. 590 */ 591 spdk_bdev_reset(g_desc, io_ch, reset_done, &done); 592 spdk_put_io_channel(io_ch); 593 poll_threads(); 594 595 /* Complete the reset. */ 596 num_completed = stub_complete_io(g_bdev.io_target, 0); 597 CU_ASSERT(num_completed == 1); 598 poll_threads(); 599 CU_ASSERT(done == true); 600 601 teardown_test(); 602 } 603 604 static void 605 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 606 { 607 enum spdk_bdev_io_status *status = cb_arg; 608 609 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 610 spdk_bdev_free_io(bdev_io); 611 } 612 613 static void io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 614 615 static void 616 aborted_reset(void) 617 { 618 struct spdk_io_channel *io_ch[2]; 619 enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING, 620 status2 = SPDK_BDEV_IO_STATUS_PENDING; 621 622 setup_test(); 623 624 set_thread(0); 625 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 626 CU_ASSERT(io_ch[0] != NULL); 627 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); 628 poll_threads(); 629 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 630 631 /* 632 * First reset has been submitted on ch0. Now submit a second 633 * reset on ch1 which will get queued since there is already a 634 * reset in progress. 635 */ 636 set_thread(1); 637 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 638 CU_ASSERT(io_ch[1] != NULL); 639 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); 640 poll_threads(); 641 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 642 643 /* 644 * Now destroy ch1. Nothing would really happen because the pending second reset 645 * is still holding a reference of ch1. 646 */ 647 set_thread(1); 648 spdk_put_io_channel(io_ch[1]); 649 poll_threads(); 650 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING); 651 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 652 653 /* 654 * Now complete the first reset, verify that both resets completed with SUCCESS 655 * status and that bdev->internal.reset_in_progress is also set back to NULL. 656 */ 657 set_thread(0); 658 spdk_put_io_channel(io_ch[0]); 659 stub_complete_io(g_bdev.io_target, 0); 660 poll_threads(); 661 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 662 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS); 663 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 664 665 /* 666 * Teardown should succeed. 667 */ 668 teardown_test(); 669 } 670 671 static void 672 aborted_reset_no_outstanding_io(void) 673 { 674 struct spdk_io_channel *io_ch[2]; 675 struct spdk_bdev_channel *bdev_ch[2]; 676 struct spdk_bdev *bdev[2]; 677 enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING, 678 status2 = SPDK_BDEV_IO_STATUS_PENDING; 679 680 setup_test(); 681 682 /* 683 * This time we test the reset without any outstanding IO 684 * present on the bdev channel, so both resets should finish 685 * immediately. 686 */ 687 688 set_thread(0); 689 /* Set reset_io_drain_timeout to allow bdev 690 * reset to stay pending until we call abort. */ 691 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 692 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 693 bdev[0] = bdev_ch[0]->bdev; 694 bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 695 CU_ASSERT(io_ch[0] != NULL); 696 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); 697 poll_threads(); 698 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 699 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 700 spdk_put_io_channel(io_ch[0]); 701 702 set_thread(1); 703 /* Set reset_io_drain_timeout to allow bdev 704 * reset to stay pending until we call abort. */ 705 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 706 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 707 bdev[1] = bdev_ch[1]->bdev; 708 bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 709 CU_ASSERT(io_ch[1] != NULL); 710 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); 711 poll_threads(); 712 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 713 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS); 714 spdk_put_io_channel(io_ch[1]); 715 716 stub_complete_io(g_bdev.io_target, 0); 717 poll_threads(); 718 719 teardown_test(); 720 } 721 722 723 static void 724 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 725 { 726 enum spdk_bdev_io_status *status = cb_arg; 727 728 *status = bdev_io->internal.status; 729 spdk_bdev_free_io(bdev_io); 730 } 731 732 static void 733 io_during_reset(void) 734 { 735 struct spdk_io_channel *io_ch[2]; 736 struct spdk_bdev_channel *bdev_ch[2]; 737 enum spdk_bdev_io_status status0, status1, status_reset; 738 int rc; 739 740 setup_test(); 741 742 /* 743 * First test normal case - submit an I/O on each of two channels (with no resets) 744 * and verify they complete successfully. 745 */ 746 set_thread(0); 747 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 748 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 749 CU_ASSERT(bdev_ch[0]->flags == 0); 750 status0 = SPDK_BDEV_IO_STATUS_PENDING; 751 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 752 CU_ASSERT(rc == 0); 753 754 set_thread(1); 755 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 756 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 757 CU_ASSERT(bdev_ch[1]->flags == 0); 758 status1 = SPDK_BDEV_IO_STATUS_PENDING; 759 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 760 CU_ASSERT(rc == 0); 761 762 poll_threads(); 763 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 764 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 765 766 set_thread(0); 767 stub_complete_io(g_bdev.io_target, 0); 768 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 769 770 set_thread(1); 771 stub_complete_io(g_bdev.io_target, 0); 772 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 773 774 /* 775 * Now submit a reset, and leave it pending while we submit I/O on two different 776 * channels. These I/O should be failed by the bdev layer since the reset is in 777 * progress. 778 */ 779 set_thread(0); 780 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 781 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset); 782 CU_ASSERT(rc == 0); 783 784 CU_ASSERT(bdev_ch[0]->flags == 0); 785 CU_ASSERT(bdev_ch[1]->flags == 0); 786 poll_threads(); 787 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS); 788 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS); 789 790 set_thread(0); 791 status0 = SPDK_BDEV_IO_STATUS_PENDING; 792 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 793 CU_ASSERT(rc == 0); 794 795 set_thread(1); 796 status1 = SPDK_BDEV_IO_STATUS_PENDING; 797 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 798 CU_ASSERT(rc == 0); 799 800 /* 801 * A reset is in progress so these read I/O should complete with aborted. Note that we 802 * need to poll_threads() since I/O completed inline have their completion deferred. 803 */ 804 poll_threads(); 805 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 806 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED); 807 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED); 808 809 /* 810 * Complete the reset 811 */ 812 set_thread(0); 813 stub_complete_io(g_bdev.io_target, 0); 814 815 /* 816 * Only poll thread 0. We should not get a completion. 817 */ 818 poll_thread(0); 819 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 820 821 /* 822 * Poll both thread 0 and 1 so the messages can propagate and we 823 * get a completion. 824 */ 825 poll_threads(); 826 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 827 828 spdk_put_io_channel(io_ch[0]); 829 set_thread(1); 830 spdk_put_io_channel(io_ch[1]); 831 poll_threads(); 832 833 teardown_test(); 834 } 835 836 static uint32_t 837 count_queued_resets(void *io_target) 838 { 839 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 840 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 841 struct ut_bdev_io *bio; 842 struct spdk_bdev_io *io; 843 uint32_t submitted_resets = 0; 844 845 TAILQ_FOREACH(bio, &ch->outstanding_io, link) { 846 io = spdk_bdev_io_from_ctx(bio); 847 if (io->type == SPDK_BDEV_IO_TYPE_RESET) { 848 submitted_resets++; 849 } 850 } 851 852 spdk_put_io_channel(_ch); 853 854 return submitted_resets; 855 } 856 857 static void 858 reset_completions(void) 859 { 860 struct spdk_io_channel *io_ch; 861 struct spdk_bdev_channel *bdev_ch; 862 struct spdk_bdev *bdev; 863 enum spdk_bdev_io_status status0, status_reset; 864 int rc, iter; 865 866 setup_test(); 867 868 /* This test covers four test cases: 869 * 1) reset_io_drain_timeout of a bdev is greater than 0 870 * 2) No outstandind IO are present on any bdev channel 871 * 3) Outstanding IO finish during bdev reset 872 * 4) Outstanding IO do not finish before reset is done waiting 873 * for them. 874 * 875 * Above conditions mainly affect the timing of bdev reset completion 876 * and whether a reset should be skipped via spdk_bdev_io_complete() 877 * or sent down to the underlying bdev module via bdev_io_submit_reset(). */ 878 879 /* Test preparation */ 880 set_thread(0); 881 io_ch = spdk_bdev_get_io_channel(g_desc); 882 bdev_ch = spdk_io_channel_get_ctx(io_ch); 883 CU_ASSERT(bdev_ch->flags == 0); 884 885 886 /* Test case 1) reset_io_drain_timeout set to 0. Reset should be sent down immediately. */ 887 bdev = &g_bdev.bdev; 888 bdev->reset_io_drain_timeout = 0; 889 890 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 891 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 892 CU_ASSERT(rc == 0); 893 poll_threads(); 894 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1); 895 896 /* Call reset completion inside bdev module. */ 897 stub_complete_io(g_bdev.io_target, 0); 898 poll_threads(); 899 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 900 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 901 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 902 903 904 /* Test case 2) no outstanding IO are present. Reset should perform one iteration over 905 * channels and then be skipped. */ 906 bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 907 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 908 909 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 910 CU_ASSERT(rc == 0); 911 poll_threads(); 912 /* Reset was never submitted to the bdev module. */ 913 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 914 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 915 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 916 917 918 /* Test case 3) outstanding IO finish during bdev reset procedure. Reset should initiate 919 * wait poller to check for IO completions every second, until reset_io_drain_timeout is 920 * reached, but finish earlier than this threshold. */ 921 status0 = SPDK_BDEV_IO_STATUS_PENDING; 922 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 923 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0); 924 CU_ASSERT(rc == 0); 925 926 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 927 CU_ASSERT(rc == 0); 928 poll_threads(); 929 /* The reset just started and should not have been submitted yet. */ 930 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 931 932 poll_threads(); 933 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 934 /* Let the poller wait for about half the time then complete outstanding IO. */ 935 for (iter = 0; iter < 2; iter++) { 936 /* Reset is still processing and not submitted at this point. */ 937 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 938 spdk_delay_us(1000 * 1000); 939 poll_threads(); 940 poll_threads(); 941 } 942 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 943 stub_complete_io(g_bdev.io_target, 0); 944 poll_threads(); 945 spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD); 946 poll_threads(); 947 poll_threads(); 948 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 949 /* Sending reset to the bdev module has been skipped. */ 950 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 951 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 952 953 954 /* Test case 4) outstanding IO are still present after reset_io_drain_timeout 955 * seconds have passed. */ 956 status0 = SPDK_BDEV_IO_STATUS_PENDING; 957 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 958 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0); 959 CU_ASSERT(rc == 0); 960 961 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 962 CU_ASSERT(rc == 0); 963 poll_threads(); 964 /* The reset just started and should not have been submitted yet. */ 965 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 966 967 poll_threads(); 968 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 969 /* Let the poller wait for reset_io_drain_timeout seconds. */ 970 for (iter = 0; iter < bdev->reset_io_drain_timeout; iter++) { 971 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 972 spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD); 973 poll_threads(); 974 poll_threads(); 975 } 976 977 /* After timing out, the reset should have been sent to the module. */ 978 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1); 979 /* Complete reset submitted to the module and the read IO. */ 980 stub_complete_io(g_bdev.io_target, 0); 981 poll_threads(); 982 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 983 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 984 985 986 /* Destroy the channel and end the test. */ 987 spdk_put_io_channel(io_ch); 988 poll_threads(); 989 990 teardown_test(); 991 } 992 993 994 static void 995 basic_qos(void) 996 { 997 struct spdk_io_channel *io_ch[2]; 998 struct spdk_bdev_channel *bdev_ch[2]; 999 struct spdk_bdev *bdev; 1000 enum spdk_bdev_io_status status, abort_status; 1001 int rc; 1002 1003 setup_test(); 1004 1005 /* Enable QoS */ 1006 bdev = &g_bdev.bdev; 1007 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 1008 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 1009 /* 1010 * Enable read/write IOPS, read only byte per second and 1011 * read/write byte per second rate limits. 1012 * In this case, all rate limits will take equal effect. 1013 */ 1014 /* 2000 read/write I/O per second, or 2 per millisecond */ 1015 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000; 1016 /* 8K read/write byte per millisecond with 4K block size */ 1017 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000; 1018 /* 8K read only byte per millisecond with 4K block size */ 1019 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000; 1020 1021 g_get_io_channel = true; 1022 1023 set_thread(0); 1024 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1025 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1026 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1027 1028 set_thread(1); 1029 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1030 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1031 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1032 1033 /* 1034 * Send an I/O on thread 0, which is where the QoS thread is running. 1035 */ 1036 set_thread(0); 1037 status = SPDK_BDEV_IO_STATUS_PENDING; 1038 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); 1039 CU_ASSERT(rc == 0); 1040 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1041 poll_threads(); 1042 stub_complete_io(g_bdev.io_target, 0); 1043 poll_threads(); 1044 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); 1045 1046 /* Send an I/O on thread 1. The QoS thread is not running here. */ 1047 status = SPDK_BDEV_IO_STATUS_PENDING; 1048 set_thread(1); 1049 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); 1050 CU_ASSERT(rc == 0); 1051 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1052 poll_threads(); 1053 /* Complete I/O on thread 0. This should not complete the I/O we submitted. */ 1054 set_thread(0); 1055 stub_complete_io(g_bdev.io_target, 0); 1056 poll_threads(); 1057 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1058 /* Now complete I/O on original thread 1. */ 1059 set_thread(1); 1060 poll_threads(); 1061 stub_complete_io(g_bdev.io_target, 0); 1062 poll_threads(); 1063 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); 1064 1065 /* Reset rate limit for the next test cases. */ 1066 spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC); 1067 poll_threads(); 1068 1069 /* 1070 * Test abort request when QoS is enabled. 1071 */ 1072 1073 /* Send an I/O on thread 0, which is where the QoS thread is running. */ 1074 set_thread(0); 1075 status = SPDK_BDEV_IO_STATUS_PENDING; 1076 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); 1077 CU_ASSERT(rc == 0); 1078 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1079 /* Send an abort to the I/O on the same thread. */ 1080 abort_status = SPDK_BDEV_IO_STATUS_PENDING; 1081 rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status); 1082 CU_ASSERT(rc == 0); 1083 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING); 1084 poll_threads(); 1085 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1086 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED); 1087 1088 /* Send an I/O on thread 1. The QoS thread is not running here. */ 1089 status = SPDK_BDEV_IO_STATUS_PENDING; 1090 set_thread(1); 1091 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); 1092 CU_ASSERT(rc == 0); 1093 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1094 poll_threads(); 1095 /* Send an abort to the I/O on the same thread. */ 1096 abort_status = SPDK_BDEV_IO_STATUS_PENDING; 1097 rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status); 1098 CU_ASSERT(rc == 0); 1099 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING); 1100 poll_threads(); 1101 /* Complete the I/O with failure and the abort with success on thread 1. */ 1102 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1103 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED); 1104 1105 set_thread(0); 1106 1107 /* 1108 * Close the descriptor only, which should stop the qos channel as 1109 * the last descriptor removed. 1110 */ 1111 spdk_bdev_close(g_desc); 1112 poll_threads(); 1113 CU_ASSERT(bdev->internal.qos->ch == NULL); 1114 1115 /* 1116 * Open the bdev again which shall setup the qos channel as the 1117 * channels are valid. 1118 */ 1119 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 1120 poll_threads(); 1121 CU_ASSERT(bdev->internal.qos->ch != NULL); 1122 1123 /* Tear down the channels */ 1124 set_thread(0); 1125 spdk_put_io_channel(io_ch[0]); 1126 set_thread(1); 1127 spdk_put_io_channel(io_ch[1]); 1128 poll_threads(); 1129 set_thread(0); 1130 1131 /* Close the descriptor, which should stop the qos channel */ 1132 spdk_bdev_close(g_desc); 1133 poll_threads(); 1134 CU_ASSERT(bdev->internal.qos->ch == NULL); 1135 1136 /* Open the bdev again, no qos channel setup without valid channels. */ 1137 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 1138 poll_threads(); 1139 CU_ASSERT(bdev->internal.qos->ch == NULL); 1140 1141 /* Create the channels in reverse order. */ 1142 set_thread(1); 1143 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1144 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1145 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1146 1147 set_thread(0); 1148 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1149 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1150 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1151 1152 /* Confirm that the qos thread is now thread 1 */ 1153 CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]); 1154 1155 /* Tear down the channels */ 1156 set_thread(0); 1157 spdk_put_io_channel(io_ch[0]); 1158 set_thread(1); 1159 spdk_put_io_channel(io_ch[1]); 1160 poll_threads(); 1161 1162 set_thread(0); 1163 1164 teardown_test(); 1165 } 1166 1167 static void 1168 io_during_qos_queue(void) 1169 { 1170 struct spdk_io_channel *io_ch[2]; 1171 struct spdk_bdev_channel *bdev_ch[2]; 1172 struct spdk_bdev *bdev; 1173 enum spdk_bdev_io_status status0, status1, status2; 1174 int rc; 1175 1176 setup_test(); 1177 MOCK_SET(spdk_get_ticks, 0); 1178 1179 /* Enable QoS */ 1180 bdev = &g_bdev.bdev; 1181 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 1182 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 1183 1184 /* 1185 * Enable read/write IOPS, read only byte per sec, write only 1186 * byte per sec and read/write byte per sec rate limits. 1187 * In this case, both read only and write only byte per sec 1188 * rate limit will take effect. 1189 */ 1190 /* 4000 read/write I/O per second, or 4 per millisecond */ 1191 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000; 1192 /* 8K byte per millisecond with 4K block size */ 1193 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000; 1194 /* 4K byte per millisecond with 4K block size */ 1195 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000; 1196 /* 4K byte per millisecond with 4K block size */ 1197 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000; 1198 1199 g_get_io_channel = true; 1200 1201 /* Create channels */ 1202 set_thread(0); 1203 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1204 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1205 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1206 1207 set_thread(1); 1208 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1209 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1210 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1211 1212 /* Send two read I/Os */ 1213 status1 = SPDK_BDEV_IO_STATUS_PENDING; 1214 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 1215 CU_ASSERT(rc == 0); 1216 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1217 set_thread(0); 1218 status0 = SPDK_BDEV_IO_STATUS_PENDING; 1219 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 1220 CU_ASSERT(rc == 0); 1221 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 1222 /* Send one write I/O */ 1223 status2 = SPDK_BDEV_IO_STATUS_PENDING; 1224 rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2); 1225 CU_ASSERT(rc == 0); 1226 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING); 1227 1228 /* Complete any I/O that arrived at the disk */ 1229 poll_threads(); 1230 set_thread(1); 1231 stub_complete_io(g_bdev.io_target, 0); 1232 set_thread(0); 1233 stub_complete_io(g_bdev.io_target, 0); 1234 poll_threads(); 1235 1236 /* Only one of the two read I/Os should complete. (logical XOR) */ 1237 if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) { 1238 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1239 } else { 1240 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 1241 } 1242 /* The write I/O should complete. */ 1243 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS); 1244 1245 /* Advance in time by a millisecond */ 1246 spdk_delay_us(1000); 1247 1248 /* Complete more I/O */ 1249 poll_threads(); 1250 set_thread(1); 1251 stub_complete_io(g_bdev.io_target, 0); 1252 set_thread(0); 1253 stub_complete_io(g_bdev.io_target, 0); 1254 poll_threads(); 1255 1256 /* Now the second read I/O should be done */ 1257 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 1258 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 1259 1260 /* Tear down the channels */ 1261 set_thread(1); 1262 spdk_put_io_channel(io_ch[1]); 1263 set_thread(0); 1264 spdk_put_io_channel(io_ch[0]); 1265 poll_threads(); 1266 1267 teardown_test(); 1268 } 1269 1270 static void 1271 io_during_qos_reset(void) 1272 { 1273 struct spdk_io_channel *io_ch[2]; 1274 struct spdk_bdev_channel *bdev_ch[2]; 1275 struct spdk_bdev *bdev; 1276 enum spdk_bdev_io_status status0, status1, reset_status; 1277 int rc; 1278 1279 setup_test(); 1280 MOCK_SET(spdk_get_ticks, 0); 1281 1282 /* Enable QoS */ 1283 bdev = &g_bdev.bdev; 1284 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 1285 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 1286 1287 /* 1288 * Enable read/write IOPS, write only byte per sec and 1289 * read/write byte per second rate limits. 1290 * In this case, read/write byte per second rate limit will 1291 * take effect first. 1292 */ 1293 /* 2000 read/write I/O per second, or 2 per millisecond */ 1294 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000; 1295 /* 4K byte per millisecond with 4K block size */ 1296 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000; 1297 /* 8K byte per millisecond with 4K block size */ 1298 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000; 1299 1300 g_get_io_channel = true; 1301 1302 /* Create channels */ 1303 set_thread(0); 1304 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1305 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1306 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1307 1308 set_thread(1); 1309 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1310 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1311 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1312 1313 /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */ 1314 status1 = SPDK_BDEV_IO_STATUS_PENDING; 1315 rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 1316 CU_ASSERT(rc == 0); 1317 set_thread(0); 1318 status0 = SPDK_BDEV_IO_STATUS_PENDING; 1319 rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 1320 CU_ASSERT(rc == 0); 1321 1322 poll_threads(); 1323 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1324 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 1325 1326 /* Reset the bdev. */ 1327 reset_status = SPDK_BDEV_IO_STATUS_PENDING; 1328 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status); 1329 CU_ASSERT(rc == 0); 1330 1331 /* Complete any I/O that arrived at the disk */ 1332 poll_threads(); 1333 set_thread(1); 1334 stub_complete_io(g_bdev.io_target, 0); 1335 set_thread(0); 1336 stub_complete_io(g_bdev.io_target, 0); 1337 poll_threads(); 1338 1339 CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1340 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED); 1341 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED); 1342 1343 /* Tear down the channels */ 1344 set_thread(1); 1345 spdk_put_io_channel(io_ch[1]); 1346 set_thread(0); 1347 spdk_put_io_channel(io_ch[0]); 1348 poll_threads(); 1349 1350 teardown_test(); 1351 } 1352 1353 static void 1354 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1355 { 1356 enum spdk_bdev_io_status *status = cb_arg; 1357 1358 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 1359 spdk_bdev_free_io(bdev_io); 1360 } 1361 1362 static void 1363 enomem(void) 1364 { 1365 struct spdk_io_channel *io_ch; 1366 struct spdk_bdev_channel *bdev_ch; 1367 struct spdk_bdev_shared_resource *shared_resource; 1368 struct ut_bdev_channel *ut_ch; 1369 const uint32_t IO_ARRAY_SIZE = 64; 1370 const uint32_t AVAIL = 20; 1371 enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset; 1372 uint32_t nomem_cnt, i; 1373 struct spdk_bdev_io *first_io; 1374 int rc; 1375 1376 setup_test(); 1377 1378 set_thread(0); 1379 io_ch = spdk_bdev_get_io_channel(g_desc); 1380 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1381 shared_resource = bdev_ch->shared_resource; 1382 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1383 ut_ch->avail_cnt = AVAIL; 1384 1385 /* First submit a number of IOs equal to what the channel can support. */ 1386 for (i = 0; i < AVAIL; i++) { 1387 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1388 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1389 CU_ASSERT(rc == 0); 1390 } 1391 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1392 1393 /* 1394 * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto 1395 * the enomem_io list. 1396 */ 1397 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1398 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1399 CU_ASSERT(rc == 0); 1400 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1401 first_io = TAILQ_FIRST(&shared_resource->nomem_io); 1402 1403 /* 1404 * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind 1405 * the first_io above. 1406 */ 1407 for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) { 1408 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1409 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1410 CU_ASSERT(rc == 0); 1411 } 1412 1413 /* Assert that first_io is still at the head of the list. */ 1414 CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io); 1415 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL)); 1416 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); 1417 CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT)); 1418 1419 /* 1420 * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have 1421 * changed since completing just 1 I/O should not trigger retrying the queued nomem_io 1422 * list. 1423 */ 1424 stub_complete_io(g_bdev.io_target, 1); 1425 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); 1426 1427 /* 1428 * Complete enough I/O to hit the nomem_threshold. This should trigger retrying nomem_io, 1429 * and we should see I/O get resubmitted to the test bdev module. 1430 */ 1431 stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1); 1432 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt); 1433 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); 1434 1435 /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */ 1436 stub_complete_io(g_bdev.io_target, 1); 1437 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); 1438 1439 /* 1440 * Send a reset and confirm that all I/O are completed, including the ones that 1441 * were queued on the nomem_io list. 1442 */ 1443 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 1444 rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset); 1445 poll_threads(); 1446 CU_ASSERT(rc == 0); 1447 /* This will complete the reset. */ 1448 stub_complete_io(g_bdev.io_target, 0); 1449 1450 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0); 1451 CU_ASSERT(shared_resource->io_outstanding == 0); 1452 1453 spdk_put_io_channel(io_ch); 1454 poll_threads(); 1455 teardown_test(); 1456 } 1457 1458 static void 1459 enomem_multi_bdev(void) 1460 { 1461 struct spdk_io_channel *io_ch; 1462 struct spdk_bdev_channel *bdev_ch; 1463 struct spdk_bdev_shared_resource *shared_resource; 1464 struct ut_bdev_channel *ut_ch; 1465 const uint32_t IO_ARRAY_SIZE = 64; 1466 const uint32_t AVAIL = 20; 1467 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1468 uint32_t i; 1469 struct ut_bdev *second_bdev; 1470 struct spdk_bdev_desc *second_desc = NULL; 1471 struct spdk_bdev_channel *second_bdev_ch; 1472 struct spdk_io_channel *second_ch; 1473 int rc; 1474 1475 setup_test(); 1476 1477 /* Register second bdev with the same io_target */ 1478 second_bdev = calloc(1, sizeof(*second_bdev)); 1479 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 1480 register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target); 1481 spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc); 1482 SPDK_CU_ASSERT_FATAL(second_desc != NULL); 1483 1484 set_thread(0); 1485 io_ch = spdk_bdev_get_io_channel(g_desc); 1486 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1487 shared_resource = bdev_ch->shared_resource; 1488 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1489 ut_ch->avail_cnt = AVAIL; 1490 1491 second_ch = spdk_bdev_get_io_channel(second_desc); 1492 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 1493 SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource); 1494 1495 /* Saturate io_target through bdev A. */ 1496 for (i = 0; i < AVAIL; i++) { 1497 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1498 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1499 CU_ASSERT(rc == 0); 1500 } 1501 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1502 1503 /* 1504 * Now submit I/O through the second bdev. This should fail with ENOMEM 1505 * and then go onto the nomem_io list. 1506 */ 1507 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1508 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1509 CU_ASSERT(rc == 0); 1510 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1511 1512 /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */ 1513 stub_complete_io(g_bdev.io_target, AVAIL); 1514 1515 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io)); 1516 CU_ASSERT(shared_resource->io_outstanding == 1); 1517 1518 /* Now complete our retried I/O */ 1519 stub_complete_io(g_bdev.io_target, 1); 1520 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0); 1521 1522 spdk_put_io_channel(io_ch); 1523 spdk_put_io_channel(second_ch); 1524 spdk_bdev_close(second_desc); 1525 unregister_bdev(second_bdev); 1526 poll_threads(); 1527 free(second_bdev); 1528 teardown_test(); 1529 } 1530 1531 static void 1532 enomem_multi_bdev_unregister(void) 1533 { 1534 struct spdk_io_channel *io_ch; 1535 struct spdk_bdev_channel *bdev_ch; 1536 struct spdk_bdev_shared_resource *shared_resource; 1537 struct ut_bdev_channel *ut_ch; 1538 const uint32_t IO_ARRAY_SIZE = 64; 1539 const uint32_t AVAIL = 20; 1540 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1541 uint32_t i; 1542 int rc; 1543 1544 setup_test(); 1545 1546 set_thread(0); 1547 io_ch = spdk_bdev_get_io_channel(g_desc); 1548 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1549 shared_resource = bdev_ch->shared_resource; 1550 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1551 ut_ch->avail_cnt = AVAIL; 1552 1553 /* Saturate io_target through the bdev. */ 1554 for (i = 0; i < AVAIL; i++) { 1555 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1556 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1557 CU_ASSERT(rc == 0); 1558 } 1559 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1560 1561 /* 1562 * Now submit I/O through the bdev. This should fail with ENOMEM 1563 * and then go onto the nomem_io list. 1564 */ 1565 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1566 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1567 CU_ASSERT(rc == 0); 1568 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1569 1570 /* Unregister the bdev to abort the IOs from nomem_io queue. */ 1571 unregister_bdev(&g_bdev); 1572 CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED); 1573 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io)); 1574 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL); 1575 1576 /* Complete the bdev's I/O. */ 1577 stub_complete_io(g_bdev.io_target, AVAIL); 1578 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0); 1579 1580 spdk_put_io_channel(io_ch); 1581 poll_threads(); 1582 teardown_test(); 1583 } 1584 1585 static void 1586 enomem_multi_io_target(void) 1587 { 1588 struct spdk_io_channel *io_ch; 1589 struct spdk_bdev_channel *bdev_ch; 1590 struct ut_bdev_channel *ut_ch; 1591 const uint32_t IO_ARRAY_SIZE = 64; 1592 const uint32_t AVAIL = 20; 1593 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1594 uint32_t i; 1595 int new_io_device; 1596 struct ut_bdev *second_bdev; 1597 struct spdk_bdev_desc *second_desc = NULL; 1598 struct spdk_bdev_channel *second_bdev_ch; 1599 struct spdk_io_channel *second_ch; 1600 int rc; 1601 1602 setup_test(); 1603 1604 /* Create new io_target and a second bdev using it */ 1605 spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch, 1606 sizeof(struct ut_bdev_channel), NULL); 1607 second_bdev = calloc(1, sizeof(*second_bdev)); 1608 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 1609 register_bdev(second_bdev, "ut_bdev2", &new_io_device); 1610 spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc); 1611 SPDK_CU_ASSERT_FATAL(second_desc != NULL); 1612 1613 set_thread(0); 1614 io_ch = spdk_bdev_get_io_channel(g_desc); 1615 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1616 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1617 ut_ch->avail_cnt = AVAIL; 1618 1619 /* Different io_target should imply a different shared_resource */ 1620 second_ch = spdk_bdev_get_io_channel(second_desc); 1621 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 1622 SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource); 1623 1624 /* Saturate io_target through bdev A. */ 1625 for (i = 0; i < AVAIL; i++) { 1626 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1627 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1628 CU_ASSERT(rc == 0); 1629 } 1630 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1631 1632 /* Issue one more I/O to fill ENOMEM list. */ 1633 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1634 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1635 CU_ASSERT(rc == 0); 1636 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1637 1638 /* 1639 * Now submit I/O through the second bdev. This should go through and complete 1640 * successfully because we're using a different io_device underneath. 1641 */ 1642 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1643 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1644 CU_ASSERT(rc == 0); 1645 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io)); 1646 stub_complete_io(second_bdev->io_target, 1); 1647 1648 /* Cleanup; Complete outstanding I/O. */ 1649 stub_complete_io(g_bdev.io_target, AVAIL); 1650 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1651 /* Complete the ENOMEM I/O */ 1652 stub_complete_io(g_bdev.io_target, 1); 1653 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); 1654 1655 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1656 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); 1657 spdk_put_io_channel(io_ch); 1658 spdk_put_io_channel(second_ch); 1659 spdk_bdev_close(second_desc); 1660 unregister_bdev(second_bdev); 1661 spdk_io_device_unregister(&new_io_device, NULL); 1662 poll_threads(); 1663 free(second_bdev); 1664 teardown_test(); 1665 } 1666 1667 static void 1668 qos_dynamic_enable_done(void *cb_arg, int status) 1669 { 1670 int *rc = cb_arg; 1671 *rc = status; 1672 } 1673 1674 static void 1675 qos_dynamic_enable(void) 1676 { 1677 struct spdk_io_channel *io_ch[2]; 1678 struct spdk_bdev_channel *bdev_ch[2]; 1679 struct spdk_bdev *bdev; 1680 enum spdk_bdev_io_status bdev_io_status[2]; 1681 uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {}; 1682 int status, second_status, rc, i; 1683 1684 setup_test(); 1685 MOCK_SET(spdk_get_ticks, 0); 1686 1687 for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) { 1688 limits[i] = UINT64_MAX; 1689 } 1690 1691 bdev = &g_bdev.bdev; 1692 1693 g_get_io_channel = true; 1694 1695 /* Create channels */ 1696 set_thread(0); 1697 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1698 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1699 CU_ASSERT(bdev_ch[0]->flags == 0); 1700 1701 set_thread(1); 1702 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1703 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1704 CU_ASSERT(bdev_ch[1]->flags == 0); 1705 1706 set_thread(0); 1707 1708 /* 1709 * Enable QoS: Read/Write IOPS, Read/Write byte, 1710 * Read only byte and Write only byte per second 1711 * rate limits. 1712 * More than 10 I/Os allowed per timeslice. 1713 */ 1714 status = -1; 1715 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1716 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100; 1717 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100; 1718 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10; 1719 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1720 poll_threads(); 1721 CU_ASSERT(status == 0); 1722 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1723 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1724 1725 /* 1726 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice. 1727 * Additional I/O will then be queued. 1728 */ 1729 set_thread(0); 1730 for (i = 0; i < 10; i++) { 1731 bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING; 1732 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]); 1733 CU_ASSERT(rc == 0); 1734 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING); 1735 poll_thread(0); 1736 stub_complete_io(g_bdev.io_target, 0); 1737 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS); 1738 } 1739 1740 /* 1741 * Send two more I/O. These I/O will be queued since the current timeslice allotment has been 1742 * filled already. We want to test that when QoS is disabled that these two I/O: 1743 * 1) are not aborted 1744 * 2) are sent back to their original thread for resubmission 1745 */ 1746 bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING; 1747 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]); 1748 CU_ASSERT(rc == 0); 1749 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING); 1750 set_thread(1); 1751 bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING; 1752 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]); 1753 CU_ASSERT(rc == 0); 1754 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING); 1755 poll_threads(); 1756 1757 /* 1758 * Disable QoS: Read/Write IOPS, Read/Write byte, 1759 * Read only byte rate limits 1760 */ 1761 status = -1; 1762 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1763 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0; 1764 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0; 1765 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1766 poll_threads(); 1767 CU_ASSERT(status == 0); 1768 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1769 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1770 1771 /* Disable QoS: Write only Byte per second rate limit */ 1772 status = -1; 1773 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0; 1774 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1775 poll_threads(); 1776 CU_ASSERT(status == 0); 1777 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1778 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1779 1780 /* 1781 * All I/O should have been resubmitted back on their original thread. Complete 1782 * all I/O on thread 0, and ensure that only the thread 0 I/O was completed. 1783 */ 1784 set_thread(0); 1785 stub_complete_io(g_bdev.io_target, 0); 1786 poll_threads(); 1787 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS); 1788 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING); 1789 1790 /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */ 1791 set_thread(1); 1792 stub_complete_io(g_bdev.io_target, 0); 1793 poll_threads(); 1794 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS); 1795 1796 /* Disable QoS again */ 1797 status = -1; 1798 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1799 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1800 poll_threads(); 1801 CU_ASSERT(status == 0); /* This should succeed */ 1802 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1803 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1804 1805 /* Enable QoS on thread 0 */ 1806 status = -1; 1807 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1808 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1809 poll_threads(); 1810 CU_ASSERT(status == 0); 1811 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1812 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1813 1814 /* Disable QoS on thread 1 */ 1815 set_thread(1); 1816 status = -1; 1817 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1818 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1819 /* Don't poll yet. This should leave the channels with QoS enabled */ 1820 CU_ASSERT(status == -1); 1821 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1822 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1823 1824 /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */ 1825 second_status = 0; 1826 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10; 1827 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status); 1828 poll_threads(); 1829 CU_ASSERT(status == 0); /* The disable should succeed */ 1830 CU_ASSERT(second_status < 0); /* The enable should fail */ 1831 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1832 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1833 1834 /* Enable QoS on thread 1. This should succeed now that the disable has completed. */ 1835 status = -1; 1836 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1837 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1838 poll_threads(); 1839 CU_ASSERT(status == 0); 1840 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1841 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1842 1843 /* Tear down the channels */ 1844 set_thread(0); 1845 spdk_put_io_channel(io_ch[0]); 1846 set_thread(1); 1847 spdk_put_io_channel(io_ch[1]); 1848 poll_threads(); 1849 1850 set_thread(0); 1851 teardown_test(); 1852 } 1853 1854 static void 1855 histogram_status_cb(void *cb_arg, int status) 1856 { 1857 g_status = status; 1858 } 1859 1860 static void 1861 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 1862 { 1863 g_status = status; 1864 g_histogram = histogram; 1865 } 1866 1867 static void 1868 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 1869 uint64_t total, uint64_t so_far) 1870 { 1871 g_count += count; 1872 } 1873 1874 static void 1875 bdev_histograms_mt(void) 1876 { 1877 struct spdk_io_channel *ch[2]; 1878 struct spdk_histogram_data *histogram; 1879 uint8_t buf[4096]; 1880 int status = false; 1881 int rc; 1882 1883 1884 setup_test(); 1885 1886 set_thread(0); 1887 ch[0] = spdk_bdev_get_io_channel(g_desc); 1888 CU_ASSERT(ch[0] != NULL); 1889 1890 set_thread(1); 1891 ch[1] = spdk_bdev_get_io_channel(g_desc); 1892 CU_ASSERT(ch[1] != NULL); 1893 1894 1895 /* Enable histogram */ 1896 spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true); 1897 poll_threads(); 1898 CU_ASSERT(g_status == 0); 1899 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true); 1900 1901 /* Allocate histogram */ 1902 histogram = spdk_histogram_data_alloc(); 1903 1904 /* Check if histogram is zeroed */ 1905 spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL); 1906 poll_threads(); 1907 CU_ASSERT(g_status == 0); 1908 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 1909 1910 g_count = 0; 1911 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 1912 1913 CU_ASSERT(g_count == 0); 1914 1915 set_thread(0); 1916 rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status); 1917 CU_ASSERT(rc == 0); 1918 1919 spdk_delay_us(10); 1920 stub_complete_io(g_bdev.io_target, 1); 1921 poll_threads(); 1922 CU_ASSERT(status == true); 1923 1924 1925 set_thread(1); 1926 rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status); 1927 CU_ASSERT(rc == 0); 1928 1929 spdk_delay_us(10); 1930 stub_complete_io(g_bdev.io_target, 1); 1931 poll_threads(); 1932 CU_ASSERT(status == true); 1933 1934 set_thread(0); 1935 1936 /* Check if histogram gathered data from all I/O channels */ 1937 spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL); 1938 poll_threads(); 1939 CU_ASSERT(g_status == 0); 1940 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true); 1941 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 1942 1943 g_count = 0; 1944 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 1945 CU_ASSERT(g_count == 2); 1946 1947 /* Disable histogram */ 1948 spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false); 1949 poll_threads(); 1950 CU_ASSERT(g_status == 0); 1951 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false); 1952 1953 spdk_histogram_data_free(histogram); 1954 1955 /* Tear down the channels */ 1956 set_thread(0); 1957 spdk_put_io_channel(ch[0]); 1958 set_thread(1); 1959 spdk_put_io_channel(ch[1]); 1960 poll_threads(); 1961 set_thread(0); 1962 teardown_test(); 1963 1964 } 1965 1966 struct timeout_io_cb_arg { 1967 struct iovec iov; 1968 uint8_t type; 1969 }; 1970 1971 static int 1972 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 1973 { 1974 struct spdk_bdev_io *bdev_io; 1975 int n = 0; 1976 1977 if (!ch) { 1978 return -1; 1979 } 1980 1981 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 1982 n++; 1983 } 1984 1985 return n; 1986 } 1987 1988 static void 1989 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 1990 { 1991 struct timeout_io_cb_arg *ctx = cb_arg; 1992 1993 ctx->type = bdev_io->type; 1994 ctx->iov.iov_base = bdev_io->iov.iov_base; 1995 ctx->iov.iov_len = bdev_io->iov.iov_len; 1996 } 1997 1998 static bool g_io_done; 1999 2000 static void 2001 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 2002 { 2003 g_io_done = true; 2004 spdk_bdev_free_io(bdev_io); 2005 } 2006 2007 static void 2008 bdev_set_io_timeout_mt(void) 2009 { 2010 struct spdk_io_channel *ch[3]; 2011 struct spdk_bdev_channel *bdev_ch[3]; 2012 struct timeout_io_cb_arg cb_arg; 2013 2014 setup_test(); 2015 2016 g_bdev.bdev.optimal_io_boundary = 16; 2017 g_bdev.bdev.split_on_optimal_io_boundary = true; 2018 2019 set_thread(0); 2020 ch[0] = spdk_bdev_get_io_channel(g_desc); 2021 CU_ASSERT(ch[0] != NULL); 2022 2023 set_thread(1); 2024 ch[1] = spdk_bdev_get_io_channel(g_desc); 2025 CU_ASSERT(ch[1] != NULL); 2026 2027 set_thread(2); 2028 ch[2] = spdk_bdev_get_io_channel(g_desc); 2029 CU_ASSERT(ch[2] != NULL); 2030 2031 /* Multi-thread mode 2032 * 1, Check the poller was registered successfully 2033 * 2, Check the timeout IO and ensure the IO was the submitted by user 2034 * 3, Check the link int the bdev_ch works right. 2035 * 4, Close desc and put io channel during the timeout poller is polling 2036 */ 2037 2038 /* In desc thread set the timeout */ 2039 set_thread(0); 2040 CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2041 CU_ASSERT(g_desc->io_timeout_poller != NULL); 2042 CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb); 2043 CU_ASSERT(g_desc->cb_arg == &cb_arg); 2044 2045 /* check the IO submitted list and timeout handler */ 2046 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0); 2047 bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]); 2048 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1); 2049 2050 set_thread(1); 2051 CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0); 2052 bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]); 2053 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1); 2054 2055 /* Now test that a single-vector command is split correctly. 2056 * Offset 14, length 8, payload 0xF000 2057 * Child - Offset 14, length 2, payload 0xF000 2058 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2059 * 2060 * Set up the expected values before calling spdk_bdev_read_blocks 2061 */ 2062 set_thread(2); 2063 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0); 2064 bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]); 2065 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3); 2066 2067 set_thread(0); 2068 memset(&cb_arg, 0, sizeof(cb_arg)); 2069 spdk_delay_us(3 * spdk_get_ticks_hz()); 2070 poll_threads(); 2071 CU_ASSERT(cb_arg.type == 0); 2072 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2073 CU_ASSERT(cb_arg.iov.iov_len == 0); 2074 2075 /* Now the time reach the limit */ 2076 spdk_delay_us(3 * spdk_get_ticks_hz()); 2077 poll_thread(0); 2078 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ); 2079 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000); 2080 CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen); 2081 stub_complete_io(g_bdev.io_target, 1); 2082 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0); 2083 2084 memset(&cb_arg, 0, sizeof(cb_arg)); 2085 set_thread(1); 2086 poll_thread(1); 2087 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2088 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 2089 CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen); 2090 stub_complete_io(g_bdev.io_target, 1); 2091 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0); 2092 2093 memset(&cb_arg, 0, sizeof(cb_arg)); 2094 set_thread(2); 2095 poll_thread(2); 2096 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ); 2097 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 2098 CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen); 2099 stub_complete_io(g_bdev.io_target, 1); 2100 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2); 2101 stub_complete_io(g_bdev.io_target, 1); 2102 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0); 2103 2104 /* Run poll_timeout_done() it means complete the timeout poller */ 2105 set_thread(0); 2106 poll_thread(0); 2107 CU_ASSERT(g_desc->refs == 0); 2108 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0); 2109 set_thread(1); 2110 CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0); 2111 set_thread(2); 2112 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0); 2113 2114 /* Trigger timeout poller to run again, desc->refs is incremented. 2115 * In thread 0 we destroy the io channel before timeout poller runs. 2116 * Timeout callback is not called on thread 0. 2117 */ 2118 spdk_delay_us(6 * spdk_get_ticks_hz()); 2119 memset(&cb_arg, 0, sizeof(cb_arg)); 2120 set_thread(0); 2121 stub_complete_io(g_bdev.io_target, 1); 2122 spdk_put_io_channel(ch[0]); 2123 poll_thread(0); 2124 CU_ASSERT(g_desc->refs == 1) 2125 CU_ASSERT(cb_arg.type == 0); 2126 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2127 CU_ASSERT(cb_arg.iov.iov_len == 0); 2128 2129 /* In thread 1 timeout poller runs then we destroy the io channel 2130 * Timeout callback is called on thread 1. 2131 */ 2132 memset(&cb_arg, 0, sizeof(cb_arg)); 2133 set_thread(1); 2134 poll_thread(1); 2135 stub_complete_io(g_bdev.io_target, 1); 2136 spdk_put_io_channel(ch[1]); 2137 poll_thread(1); 2138 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2139 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000); 2140 CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen); 2141 2142 /* Close the desc. 2143 * Unregister the timeout poller first. 2144 * Then decrement desc->refs but it's not zero yet so desc is not freed. 2145 */ 2146 set_thread(0); 2147 spdk_bdev_close(g_desc); 2148 CU_ASSERT(g_desc->refs == 1); 2149 CU_ASSERT(g_desc->io_timeout_poller == NULL); 2150 2151 /* Timeout poller runs on thread 2 then we destroy the io channel. 2152 * Desc is closed so we would exit the timeout poller directly. 2153 * timeout callback is not called on thread 2. 2154 */ 2155 memset(&cb_arg, 0, sizeof(cb_arg)); 2156 set_thread(2); 2157 poll_thread(2); 2158 stub_complete_io(g_bdev.io_target, 1); 2159 spdk_put_io_channel(ch[2]); 2160 poll_thread(2); 2161 CU_ASSERT(cb_arg.type == 0); 2162 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2163 CU_ASSERT(cb_arg.iov.iov_len == 0); 2164 2165 set_thread(0); 2166 poll_thread(0); 2167 g_teardown_done = false; 2168 unregister_bdev(&g_bdev); 2169 spdk_io_device_unregister(&g_io_device, NULL); 2170 spdk_bdev_finish(finish_cb, NULL); 2171 spdk_iobuf_finish(finish_cb, NULL); 2172 poll_threads(); 2173 memset(&g_bdev, 0, sizeof(g_bdev)); 2174 CU_ASSERT(g_teardown_done == true); 2175 g_teardown_done = false; 2176 free_threads(); 2177 free_cores(); 2178 } 2179 2180 static bool g_io_done2; 2181 static bool g_lock_lba_range_done; 2182 static bool g_unlock_lba_range_done; 2183 2184 static void 2185 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 2186 { 2187 g_io_done2 = true; 2188 spdk_bdev_free_io(bdev_io); 2189 } 2190 2191 static void 2192 lock_lba_range_done(struct lba_range *range, void *ctx, int status) 2193 { 2194 g_lock_lba_range_done = true; 2195 } 2196 2197 static void 2198 unlock_lba_range_done(struct lba_range *range, void *ctx, int status) 2199 { 2200 g_unlock_lba_range_done = true; 2201 } 2202 2203 static uint32_t 2204 stub_channel_outstanding_cnt(void *io_target) 2205 { 2206 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 2207 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 2208 uint32_t outstanding_cnt; 2209 2210 outstanding_cnt = ch->outstanding_cnt; 2211 2212 spdk_put_io_channel(_ch); 2213 return outstanding_cnt; 2214 } 2215 2216 static void 2217 lock_lba_range_then_submit_io(void) 2218 { 2219 struct spdk_bdev_desc *desc = NULL; 2220 void *io_target; 2221 struct spdk_io_channel *io_ch[3]; 2222 struct spdk_bdev_channel *bdev_ch[3]; 2223 struct lba_range *range; 2224 char buf[4096]; 2225 int ctx0, ctx1, ctx2; 2226 int rc; 2227 2228 setup_test(); 2229 2230 io_target = g_bdev.io_target; 2231 desc = g_desc; 2232 2233 set_thread(0); 2234 io_ch[0] = spdk_bdev_get_io_channel(desc); 2235 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 2236 CU_ASSERT(io_ch[0] != NULL); 2237 2238 set_thread(1); 2239 io_ch[1] = spdk_bdev_get_io_channel(desc); 2240 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 2241 CU_ASSERT(io_ch[1] != NULL); 2242 2243 set_thread(0); 2244 g_lock_lba_range_done = false; 2245 rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0); 2246 CU_ASSERT(rc == 0); 2247 poll_threads(); 2248 2249 /* The lock should immediately become valid, since there are no outstanding 2250 * write I/O. 2251 */ 2252 CU_ASSERT(g_lock_lba_range_done == true); 2253 range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges); 2254 SPDK_CU_ASSERT_FATAL(range != NULL); 2255 CU_ASSERT(range->offset == 20); 2256 CU_ASSERT(range->length == 10); 2257 CU_ASSERT(range->owner_ch == bdev_ch[0]); 2258 2259 g_io_done = false; 2260 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2261 rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0); 2262 CU_ASSERT(rc == 0); 2263 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2264 2265 stub_complete_io(io_target, 1); 2266 poll_threads(); 2267 CU_ASSERT(g_io_done == true); 2268 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2269 2270 /* Try a write I/O. This should actually be allowed to execute, since the channel 2271 * holding the lock is submitting the write I/O. 2272 */ 2273 g_io_done = false; 2274 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2275 rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0); 2276 CU_ASSERT(rc == 0); 2277 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2278 2279 stub_complete_io(io_target, 1); 2280 poll_threads(); 2281 CU_ASSERT(g_io_done == true); 2282 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2283 2284 /* Try a write I/O. This should get queued in the io_locked tailq. */ 2285 set_thread(1); 2286 g_io_done = false; 2287 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2288 rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1); 2289 CU_ASSERT(rc == 0); 2290 poll_threads(); 2291 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0); 2292 CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2293 CU_ASSERT(g_io_done == false); 2294 2295 /* Try to unlock the lba range using thread 1's io_ch. This should fail. */ 2296 rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1); 2297 CU_ASSERT(rc == -EINVAL); 2298 2299 /* Now create a new channel and submit a write I/O with it. This should also be queued. 2300 * The new channel should inherit the active locks from the bdev's internal list. 2301 */ 2302 set_thread(2); 2303 io_ch[2] = spdk_bdev_get_io_channel(desc); 2304 bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]); 2305 CU_ASSERT(io_ch[2] != NULL); 2306 2307 g_io_done2 = false; 2308 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2309 rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2); 2310 CU_ASSERT(rc == 0); 2311 poll_threads(); 2312 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0); 2313 CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2314 CU_ASSERT(g_io_done2 == false); 2315 2316 set_thread(0); 2317 rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0); 2318 CU_ASSERT(rc == 0); 2319 poll_threads(); 2320 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges)); 2321 2322 /* The LBA range is unlocked, so the write IOs should now have started execution. */ 2323 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2324 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2325 2326 set_thread(1); 2327 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2328 stub_complete_io(io_target, 1); 2329 set_thread(2); 2330 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2331 stub_complete_io(io_target, 1); 2332 2333 poll_threads(); 2334 CU_ASSERT(g_io_done == true); 2335 CU_ASSERT(g_io_done2 == true); 2336 2337 /* Tear down the channels */ 2338 set_thread(0); 2339 spdk_put_io_channel(io_ch[0]); 2340 set_thread(1); 2341 spdk_put_io_channel(io_ch[1]); 2342 set_thread(2); 2343 spdk_put_io_channel(io_ch[2]); 2344 poll_threads(); 2345 set_thread(0); 2346 teardown_test(); 2347 } 2348 2349 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel(). 2350 * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However 2351 * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel(). 2352 * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset() 2353 * completes. Test this behavior. 2354 */ 2355 static void 2356 unregister_during_reset(void) 2357 { 2358 struct spdk_io_channel *io_ch[2]; 2359 bool done_reset = false, done_unregister = false; 2360 int rc; 2361 2362 setup_test(); 2363 set_thread(0); 2364 2365 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 2366 SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL); 2367 2368 set_thread(1); 2369 2370 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 2371 SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL); 2372 2373 set_thread(0); 2374 2375 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 2376 2377 rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset); 2378 CU_ASSERT(rc == 0); 2379 2380 set_thread(0); 2381 2382 poll_thread_times(0, 1); 2383 2384 spdk_bdev_close(g_desc); 2385 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister); 2386 2387 CU_ASSERT(done_reset == false); 2388 CU_ASSERT(done_unregister == false); 2389 2390 poll_threads(); 2391 2392 stub_complete_io(g_bdev.io_target, 0); 2393 2394 poll_threads(); 2395 2396 CU_ASSERT(done_reset == true); 2397 CU_ASSERT(done_unregister == false); 2398 2399 spdk_put_io_channel(io_ch[0]); 2400 2401 set_thread(1); 2402 2403 spdk_put_io_channel(io_ch[1]); 2404 2405 poll_threads(); 2406 2407 CU_ASSERT(done_unregister == true); 2408 2409 /* Restore the original g_bdev so that we can use teardown_test(). */ 2410 set_thread(0); 2411 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 2412 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 2413 teardown_test(); 2414 } 2415 2416 static void 2417 bdev_init_wt_cb(void *done, int rc) 2418 { 2419 } 2420 2421 static int 2422 wrong_thread_setup(void) 2423 { 2424 allocate_cores(1); 2425 allocate_threads(2); 2426 set_thread(0); 2427 2428 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 2429 ut_accel_ch_destroy_cb, 0, NULL); 2430 spdk_bdev_initialize(bdev_init_wt_cb, NULL); 2431 spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch, 2432 sizeof(struct ut_bdev_channel), NULL); 2433 2434 set_thread(1); 2435 2436 return 0; 2437 } 2438 2439 static int 2440 wrong_thread_teardown(void) 2441 { 2442 int rc = 0; 2443 2444 set_thread(0); 2445 2446 g_teardown_done = false; 2447 spdk_io_device_unregister(&g_io_device, NULL); 2448 spdk_bdev_finish(finish_cb, NULL); 2449 poll_threads(); 2450 memset(&g_bdev, 0, sizeof(g_bdev)); 2451 if (!g_teardown_done) { 2452 fprintf(stderr, "%s:%d %s: teardown not done\n", __FILE__, __LINE__, __func__); 2453 rc = -1; 2454 } 2455 g_teardown_done = false; 2456 2457 spdk_io_device_unregister(&g_accel_io_device, NULL); 2458 free_threads(); 2459 free_cores(); 2460 2461 return rc; 2462 } 2463 2464 static void 2465 _bdev_unregistered_wt(void *ctx, int rc) 2466 { 2467 struct spdk_thread **threadp = ctx; 2468 2469 *threadp = spdk_get_thread(); 2470 } 2471 2472 static void 2473 spdk_bdev_register_wt(void) 2474 { 2475 struct spdk_bdev bdev = { 0 }; 2476 int rc; 2477 struct spdk_thread *unreg_thread; 2478 2479 bdev.name = "wt_bdev"; 2480 bdev.fn_table = &fn_table; 2481 bdev.module = &bdev_ut_if; 2482 bdev.blocklen = 4096; 2483 bdev.blockcnt = 1024; 2484 2485 /* Can register only on app thread */ 2486 rc = spdk_bdev_register(&bdev); 2487 CU_ASSERT(rc == -EINVAL); 2488 2489 /* Can unregister on any thread */ 2490 set_thread(0); 2491 rc = spdk_bdev_register(&bdev); 2492 CU_ASSERT(rc == 0); 2493 set_thread(1); 2494 unreg_thread = NULL; 2495 spdk_bdev_unregister(&bdev, _bdev_unregistered_wt, &unreg_thread); 2496 poll_threads(); 2497 CU_ASSERT(unreg_thread == spdk_get_thread()); 2498 2499 /* Can unregister by name on any thread */ 2500 set_thread(0); 2501 rc = spdk_bdev_register(&bdev); 2502 CU_ASSERT(rc == 0); 2503 set_thread(1); 2504 unreg_thread = NULL; 2505 rc = spdk_bdev_unregister_by_name(bdev.name, bdev.module, _bdev_unregistered_wt, 2506 &unreg_thread); 2507 CU_ASSERT(rc == 0); 2508 poll_threads(); 2509 CU_ASSERT(unreg_thread == spdk_get_thread()); 2510 } 2511 2512 static void 2513 wait_for_examine_cb(void *arg) 2514 { 2515 struct spdk_thread **thread = arg; 2516 2517 *thread = spdk_get_thread(); 2518 } 2519 2520 static void 2521 spdk_bdev_examine_wt(void) 2522 { 2523 int rc; 2524 bool save_auto_examine = g_bdev_opts.bdev_auto_examine; 2525 struct spdk_thread *thread; 2526 2527 g_bdev_opts.bdev_auto_examine = false; 2528 2529 set_thread(0); 2530 register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device); 2531 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL); 2532 set_thread(1); 2533 2534 /* Can examine only on the app thread */ 2535 rc = spdk_bdev_examine("ut_bdev_wt"); 2536 CU_ASSERT(rc == -EINVAL); 2537 unregister_bdev(&g_bdev); 2538 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL); 2539 2540 /* Can wait for examine on app thread, callback called on app thread. */ 2541 set_thread(0); 2542 register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device); 2543 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL); 2544 thread = NULL; 2545 rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread); 2546 CU_ASSERT(rc == 0); 2547 poll_threads(); 2548 CU_ASSERT(thread == spdk_get_thread()); 2549 unregister_bdev(&g_bdev); 2550 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL); 2551 2552 /* Can wait for examine on non-app thread, callback called on same thread. */ 2553 set_thread(0); 2554 register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device); 2555 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL); 2556 thread = NULL; 2557 rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread); 2558 CU_ASSERT(rc == 0); 2559 poll_threads(); 2560 CU_ASSERT(thread == spdk_get_thread()); 2561 unregister_bdev(&g_bdev); 2562 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL); 2563 2564 unregister_bdev(&g_bdev); 2565 g_bdev_opts.bdev_auto_examine = save_auto_examine; 2566 } 2567 2568 static void 2569 event_notify_and_close(void) 2570 { 2571 int resize_notify_count = 0; 2572 struct spdk_bdev_desc *desc = NULL; 2573 struct spdk_bdev *bdev; 2574 int rc; 2575 2576 setup_test(); 2577 set_thread(0); 2578 2579 /* setup_test() automatically opens the bdev, but this test needs to do 2580 * that in a different way. */ 2581 spdk_bdev_close(g_desc); 2582 poll_threads(); 2583 2584 set_thread(1); 2585 2586 rc = spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &resize_notify_count, &desc); 2587 CU_ASSERT(rc == 0); 2588 SPDK_CU_ASSERT_FATAL(desc != NULL); 2589 2590 bdev = spdk_bdev_desc_get_bdev(desc); 2591 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2592 2593 /* Test a normal case that a resize event is notified. */ 2594 set_thread(0); 2595 2596 rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 2); 2597 CU_ASSERT(rc == 0); 2598 CU_ASSERT(bdev->blockcnt == 1024 * 2); 2599 CU_ASSERT(desc->refs == 1); 2600 CU_ASSERT(resize_notify_count == 0); 2601 2602 poll_threads(); 2603 2604 CU_ASSERT(desc->refs == 0); 2605 CU_ASSERT(resize_notify_count == 1); 2606 2607 /* Test a complex case if the bdev is closed after two event_notify messages are sent, 2608 * then both event_notify messages are discarded and the desc is freed. 2609 */ 2610 rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 3); 2611 CU_ASSERT(rc == 0); 2612 CU_ASSERT(bdev->blockcnt == 1024 * 3); 2613 CU_ASSERT(desc->refs == 1); 2614 CU_ASSERT(resize_notify_count == 1); 2615 2616 rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 4); 2617 CU_ASSERT(rc == 0); 2618 CU_ASSERT(bdev->blockcnt == 1024 * 4); 2619 CU_ASSERT(desc->refs == 2); 2620 CU_ASSERT(resize_notify_count == 1); 2621 2622 set_thread(1); 2623 2624 spdk_bdev_close(desc); 2625 CU_ASSERT(desc->closed == true); 2626 CU_ASSERT(desc->refs == 2); 2627 CU_ASSERT(resize_notify_count == 1); 2628 2629 poll_threads(); 2630 2631 CU_ASSERT(resize_notify_count == 1); 2632 2633 set_thread(0); 2634 2635 /* Restore g_desc. Then, we can execute teardown_test(). */ 2636 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 2637 teardown_test(); 2638 } 2639 2640 /* There was a bug that bdev_channel_poll_qos() called spdk_for_each_channel() 2641 * after spdk_io_device_unregister() is called for a bdev. 2642 * 2643 * This occurred in the following sequence. 2644 * - There was a bdev and a channel for it. 2645 * - QoS was enabled and started. 2646 * - spdk_bdev_unregister() was called. However, there was a open descriptor. 2647 * Hence, remove notification was sent and unregistration was pending. 2648 * - Receiving a event notification, spdk_put_io_channel() and spdk_bdev_close() were 2649 * called. In spdk_bdev_close(), the existing QoS was unbound and a message was sent 2650 * to it, and then the pending spdk_io_device_unregister() was finally executed. 2651 * - If bdev_channel_poll_qos() was executed before the message was processed, 2652 * bdev_channel_poll_qos() called spdk_bdev_for_each_channel() and hit assert(). 2653 * 2654 * The fix was in this case bdev_channel_poll_qos() returned immediately because QoS 2655 * was not enabled. bdev_qos_destroy() created a new disabled QoS and swapped it with 2656 * the existing QoS. 2657 * 2658 * This test case was added to avoid degradation in future. 2659 */ 2660 static void 2661 unregister_and_qos_poller(void) 2662 { 2663 struct spdk_io_channel *io_ch; 2664 struct spdk_bdev_channel *bdev_ch; 2665 struct spdk_bdev_desc *desc = NULL; 2666 struct spdk_bdev_qos *old_qos; 2667 uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {}; 2668 bool remove_notify = false, done_unregister = false; 2669 int status = -1, rc; 2670 2671 setup_test(); 2672 set_thread(0); 2673 2674 MOCK_SET(spdk_get_ticks, 10); 2675 2676 /* setup_test() automatically opens the bdev, but this test needs to do 2677 * that in a different way. 2678 */ 2679 spdk_bdev_close(g_desc); 2680 poll_threads(); 2681 2682 /* We want to get remove event notification to check if unregistration 2683 * is deferred. 2684 */ 2685 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc); 2686 SPDK_CU_ASSERT_FATAL(desc != NULL); 2687 CU_ASSERT(remove_notify == false); 2688 2689 io_ch = spdk_bdev_get_io_channel(desc); 2690 SPDK_CU_ASSERT_FATAL(io_ch != NULL); 2691 bdev_ch = spdk_io_channel_get_ctx(io_ch); 2692 2693 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 2694 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0; 2695 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0; 2696 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0; 2697 spdk_bdev_set_qos_rate_limits(&g_bdev.bdev, limits, qos_dynamic_enable_done, &status); 2698 poll_threads(); 2699 CU_ASSERT(status == 0); 2700 CU_ASSERT((bdev_ch->flags & BDEV_CH_QOS_ENABLED) != 0); 2701 2702 old_qos = g_bdev.bdev.internal.qos; 2703 CU_ASSERT(old_qos != NULL); 2704 2705 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister); 2706 CU_ASSERT(done_unregister == false); 2707 CU_ASSERT(remove_notify == false); 2708 2709 poll_threads(); 2710 CU_ASSERT(done_unregister == false); 2711 CU_ASSERT(remove_notify == true); 2712 2713 spdk_put_io_channel(io_ch); 2714 spdk_bdev_close(desc); 2715 2716 CU_ASSERT(g_bdev.bdev.internal.qos != NULL); 2717 CU_ASSERT(g_bdev.bdev.internal.qos->thread == NULL); 2718 CU_ASSERT(old_qos != g_bdev.bdev.internal.qos); 2719 2720 /* bdev_channel_poll_qos() has a chance to be executed in this small window. */ 2721 spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC); 2722 2723 rc = bdev_channel_poll_qos(&g_bdev.bdev); 2724 CU_ASSERT(rc == SPDK_POLLER_IDLE); 2725 2726 poll_threads(); 2727 2728 CU_ASSERT(done_unregister == true); 2729 2730 /* Restore the original g_bdev so that we can use teardown_test(). */ 2731 set_thread(0); 2732 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 2733 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 2734 teardown_test(); 2735 } 2736 2737 /** 2738 * There was a race between reset start and complete: 2739 * 2740 * 1. reset_1 is completing. It clears bdev->internal.reset_in_progress and sends 2741 * unfreeze_channel messages to remove queued resets of all channels. 2742 * 2. reset_2 is starting. As bdev->internal.reset_in_progress has been cleared, it 2743 * is inserted to queued_resets list and starts to freeze channels. 2744 * 3. reset_1's unfreeze_channel message removes reset_2 from queued_resets list. 2745 * 4. reset_2 finishes freezing channels, but the corresponding bdev_io has gone, 2746 * hence resulting in segmentation fault. 2747 * 2748 * To fix this, 2749 * 1. Do not queue the reset that is submitted to the underlying device. 2750 * 2. Queue all other resets in a per-bdev list, so all of them can be completed 2751 * at once. 2752 */ 2753 static void 2754 reset_start_complete_race(void) 2755 { 2756 struct spdk_io_channel *io_ch; 2757 bool done_reset_1 = false, done_reset_2 = false; 2758 uint32_t num_completed; 2759 int rc; 2760 2761 setup_test(); 2762 set_thread(0); 2763 2764 io_ch = spdk_bdev_get_io_channel(g_desc); 2765 SPDK_CU_ASSERT_FATAL(io_ch != NULL); 2766 2767 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 2768 2769 /** 2770 * Submit reset_1. 2771 */ 2772 rc = spdk_bdev_reset(g_desc, io_ch, reset_done, &done_reset_1); 2773 CU_ASSERT(rc == 0); 2774 2775 /** 2776 * Poll threads so that reset_1 completes freezing channels and gets submitted to 2777 * the undelying device. 2778 */ 2779 poll_threads(); 2780 2781 /** 2782 * Complete reset_1. This will start the unfreezing channel stage of reset_1, but 2783 * not complete it until next poll_threads. 2784 */ 2785 num_completed = stub_complete_io(g_bdev.io_target, 0); 2786 CU_ASSERT(num_completed == 1); 2787 2788 /** 2789 * Submit reset_2. It should be queued because reset_1 has not been completed yet. 2790 */ 2791 rc = spdk_bdev_reset(g_desc, io_ch, reset_done, &done_reset_2); 2792 CU_ASSERT(rc == 0); 2793 2794 /** 2795 * Poll threads. reset_1 completes unfreezing channels, then completes queued reset_2, 2796 * and finally itself gets completed. 2797 */ 2798 poll_threads(); 2799 CU_ASSERT(done_reset_1 == true); 2800 CU_ASSERT(done_reset_2 == true); 2801 2802 spdk_put_io_channel(io_ch); 2803 teardown_test(); 2804 } 2805 2806 int 2807 main(int argc, char **argv) 2808 { 2809 CU_pSuite suite = NULL; 2810 CU_pSuite suite_wt = NULL; 2811 unsigned int num_failures; 2812 2813 CU_initialize_registry(); 2814 2815 suite = CU_add_suite("bdev", NULL, NULL); 2816 suite_wt = CU_add_suite("bdev_wrong_thread", wrong_thread_setup, wrong_thread_teardown); 2817 2818 CU_ADD_TEST(suite, basic); 2819 CU_ADD_TEST(suite, unregister_and_close); 2820 CU_ADD_TEST(suite, unregister_and_close_different_threads); 2821 CU_ADD_TEST(suite, basic_qos); 2822 CU_ADD_TEST(suite, put_channel_during_reset); 2823 CU_ADD_TEST(suite, aborted_reset); 2824 CU_ADD_TEST(suite, aborted_reset_no_outstanding_io); 2825 CU_ADD_TEST(suite, io_during_reset); 2826 CU_ADD_TEST(suite, reset_completions); 2827 CU_ADD_TEST(suite, io_during_qos_queue); 2828 CU_ADD_TEST(suite, io_during_qos_reset); 2829 CU_ADD_TEST(suite, enomem); 2830 CU_ADD_TEST(suite, enomem_multi_bdev); 2831 CU_ADD_TEST(suite, enomem_multi_bdev_unregister); 2832 CU_ADD_TEST(suite, enomem_multi_io_target); 2833 CU_ADD_TEST(suite, qos_dynamic_enable); 2834 CU_ADD_TEST(suite, bdev_histograms_mt); 2835 CU_ADD_TEST(suite, bdev_set_io_timeout_mt); 2836 CU_ADD_TEST(suite, lock_lba_range_then_submit_io); 2837 CU_ADD_TEST(suite, unregister_during_reset); 2838 CU_ADD_TEST(suite_wt, spdk_bdev_register_wt); 2839 CU_ADD_TEST(suite_wt, spdk_bdev_examine_wt); 2840 CU_ADD_TEST(suite, event_notify_and_close); 2841 CU_ADD_TEST(suite, unregister_and_qos_poller); 2842 CU_ADD_TEST(suite, reset_start_complete_race); 2843 2844 num_failures = spdk_ut_run_tests(argc, argv, NULL); 2845 CU_cleanup_registry(); 2846 return num_failures; 2847 } 2848