1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 #define BDEV_UT_NUM_THREADS 3 19 20 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 21 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 22 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk, 23 int *asc, int *ascq)); 24 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 25 "test_domain"); 26 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 27 (struct spdk_memory_domain *domain), 0); 28 DEFINE_STUB_V(spdk_accel_sequence_finish, 29 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 30 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 31 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 32 DEFINE_STUB(spdk_accel_append_copy, int, 33 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs, 34 uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 35 struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, 36 void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 37 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL); 38 39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 40 int 41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 42 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 43 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 44 { 45 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 46 47 cpl_cb(cpl_cb_arg, 0); 48 return 0; 49 } 50 51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 52 int 53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 54 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 55 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 56 { 57 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 58 59 cpl_cb(cpl_cb_arg, 0); 60 return 0; 61 } 62 63 static int g_accel_io_device; 64 65 struct spdk_io_channel * 66 spdk_accel_get_io_channel(void) 67 { 68 return spdk_get_io_channel(&g_accel_io_device); 69 } 70 71 struct ut_bdev { 72 struct spdk_bdev bdev; 73 void *io_target; 74 }; 75 76 struct ut_bdev_channel { 77 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 78 uint32_t outstanding_cnt; 79 uint32_t avail_cnt; 80 struct spdk_thread *thread; 81 TAILQ_ENTRY(ut_bdev_channel) link; 82 }; 83 84 int g_io_device; 85 struct ut_bdev g_bdev; 86 struct spdk_bdev_desc *g_desc; 87 bool g_teardown_done = false; 88 bool g_get_io_channel = true; 89 bool g_create_ch = true; 90 bool g_init_complete_called = false; 91 bool g_fini_start_called = true; 92 int g_status = 0; 93 int g_count = 0; 94 struct spdk_histogram_data *g_histogram = NULL; 95 TAILQ_HEAD(, ut_bdev_channel) g_ut_channels; 96 97 static int 98 ut_accel_ch_create_cb(void *io_device, void *ctx) 99 { 100 return 0; 101 } 102 103 static void 104 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 105 { 106 } 107 108 static int 109 stub_create_ch(void *io_device, void *ctx_buf) 110 { 111 struct ut_bdev_channel *ch = ctx_buf; 112 113 if (g_create_ch == false) { 114 return -1; 115 } 116 117 TAILQ_INIT(&ch->outstanding_io); 118 ch->outstanding_cnt = 0; 119 /* 120 * When avail gets to 0, the submit_request function will return ENOMEM. 121 * Most tests to not want ENOMEM to occur, so by default set this to a 122 * big value that won't get hit. The ENOMEM tests can then override this 123 * value to something much smaller to induce ENOMEM conditions. 124 */ 125 ch->avail_cnt = 2048; 126 ch->thread = spdk_get_thread(); 127 128 TAILQ_INSERT_TAIL(&g_ut_channels, ch, link); 129 130 return 0; 131 } 132 133 static void 134 stub_destroy_ch(void *io_device, void *ctx_buf) 135 { 136 struct ut_bdev_channel *ch = ctx_buf; 137 138 TAILQ_REMOVE(&g_ut_channels, ch, link); 139 } 140 141 static struct spdk_io_channel * 142 stub_get_io_channel(void *ctx) 143 { 144 struct ut_bdev *ut_bdev = ctx; 145 146 if (g_get_io_channel == true) { 147 return spdk_get_io_channel(ut_bdev->io_target); 148 } else { 149 return NULL; 150 } 151 } 152 153 static int 154 stub_destruct(void *ctx) 155 { 156 return 0; 157 } 158 159 static void 160 stub_reset_channel(void *ctx) 161 { 162 struct ut_bdev_channel *ch = ctx; 163 struct spdk_bdev_io *io; 164 165 while (!TAILQ_EMPTY(&ch->outstanding_io)) { 166 io = TAILQ_FIRST(&ch->outstanding_io); 167 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 168 ch->outstanding_cnt--; 169 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED); 170 ch->avail_cnt++; 171 } 172 } 173 174 static void 175 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 176 { 177 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch), *tmp_ch; 178 struct spdk_bdev_io *io; 179 180 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { 181 TAILQ_FOREACH(tmp_ch, &g_ut_channels, link) { 182 if (spdk_get_thread() == tmp_ch->thread) { 183 stub_reset_channel(tmp_ch); 184 } else { 185 spdk_thread_send_msg(tmp_ch->thread, stub_reset_channel, tmp_ch); 186 } 187 } 188 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 189 TAILQ_FOREACH(io, &ch->outstanding_io, module_link) { 190 if (io == bdev_io->u.abort.bio_to_abort) { 191 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 192 ch->outstanding_cnt--; 193 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED); 194 ch->avail_cnt++; 195 196 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 197 return; 198 } 199 } 200 201 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 202 return; 203 } 204 205 if (ch->avail_cnt > 0) { 206 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 207 ch->outstanding_cnt++; 208 ch->avail_cnt--; 209 } else { 210 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 211 } 212 } 213 214 static uint32_t 215 stub_complete_io(void *io_target, uint32_t num_to_complete) 216 { 217 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 218 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 219 struct spdk_bdev_io *io; 220 bool complete_all = (num_to_complete == 0); 221 uint32_t num_completed = 0; 222 223 while (complete_all || num_completed < num_to_complete) { 224 if (TAILQ_EMPTY(&ch->outstanding_io)) { 225 break; 226 } 227 io = TAILQ_FIRST(&ch->outstanding_io); 228 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 229 ch->outstanding_cnt--; 230 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS); 231 ch->avail_cnt++; 232 num_completed++; 233 } 234 spdk_put_io_channel(_ch); 235 return num_completed; 236 } 237 238 static bool 239 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type) 240 { 241 return true; 242 } 243 244 static struct spdk_bdev_fn_table fn_table = { 245 .get_io_channel = stub_get_io_channel, 246 .destruct = stub_destruct, 247 .submit_request = stub_submit_request, 248 .io_type_supported = stub_io_type_supported, 249 }; 250 251 struct spdk_bdev_module bdev_ut_if; 252 253 static int 254 module_init(void) 255 { 256 spdk_bdev_module_init_done(&bdev_ut_if); 257 return 0; 258 } 259 260 static void 261 module_fini(void) 262 { 263 } 264 265 static void 266 init_complete(void) 267 { 268 g_init_complete_called = true; 269 } 270 271 static void 272 fini_start(void) 273 { 274 g_fini_start_called = true; 275 } 276 277 struct spdk_bdev_module bdev_ut_if = { 278 .name = "bdev_ut", 279 .module_init = module_init, 280 .module_fini = module_fini, 281 .async_init = true, 282 .init_complete = init_complete, 283 .fini_start = fini_start, 284 }; 285 286 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 287 288 static void 289 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target) 290 { 291 memset(ut_bdev, 0, sizeof(*ut_bdev)); 292 293 ut_bdev->io_target = io_target; 294 ut_bdev->bdev.ctxt = ut_bdev; 295 ut_bdev->bdev.name = name; 296 ut_bdev->bdev.fn_table = &fn_table; 297 ut_bdev->bdev.module = &bdev_ut_if; 298 ut_bdev->bdev.blocklen = 4096; 299 ut_bdev->bdev.blockcnt = 1024; 300 301 spdk_bdev_register(&ut_bdev->bdev); 302 } 303 304 static void 305 unregister_bdev(struct ut_bdev *ut_bdev) 306 { 307 /* Handle any deferred messages. */ 308 poll_threads(); 309 spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL); 310 /* Handle the async bdev unregister. */ 311 poll_threads(); 312 } 313 314 static void 315 bdev_init_cb(void *done, int rc) 316 { 317 CU_ASSERT(rc == 0); 318 *(bool *)done = true; 319 } 320 321 static void 322 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 323 void *event_ctx) 324 { 325 switch (type) { 326 case SPDK_BDEV_EVENT_REMOVE: 327 if (event_ctx != NULL) { 328 *(bool *)event_ctx = true; 329 } 330 break; 331 case SPDK_BDEV_EVENT_RESIZE: 332 if (event_ctx != NULL) { 333 *(int *)event_ctx += 1; 334 } 335 break; 336 default: 337 CU_ASSERT(false); 338 break; 339 } 340 } 341 342 static void 343 setup_test(void) 344 { 345 bool done = false; 346 int rc; 347 348 TAILQ_INIT(&g_ut_channels); 349 350 allocate_cores(BDEV_UT_NUM_THREADS); 351 allocate_threads(BDEV_UT_NUM_THREADS); 352 set_thread(0); 353 354 rc = spdk_iobuf_initialize(); 355 CU_ASSERT(rc == 0); 356 spdk_bdev_initialize(bdev_init_cb, &done); 357 spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch, 358 sizeof(struct ut_bdev_channel), NULL); 359 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 360 ut_accel_ch_destroy_cb, 0, NULL); 361 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 362 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 363 } 364 365 static void 366 finish_cb(void *cb_arg) 367 { 368 g_teardown_done = true; 369 } 370 371 static void 372 teardown_test(void) 373 { 374 set_thread(0); 375 g_teardown_done = false; 376 spdk_bdev_close(g_desc); 377 g_desc = NULL; 378 unregister_bdev(&g_bdev); 379 spdk_io_device_unregister(&g_io_device, NULL); 380 spdk_bdev_finish(finish_cb, NULL); 381 spdk_io_device_unregister(&g_accel_io_device, NULL); 382 spdk_iobuf_finish(finish_cb, NULL); 383 poll_threads(); 384 memset(&g_bdev, 0, sizeof(g_bdev)); 385 CU_ASSERT(g_teardown_done == true); 386 g_teardown_done = false; 387 free_threads(); 388 free_cores(); 389 CU_ASSERT(TAILQ_EMPTY(&g_ut_channels)) 390 } 391 392 static uint32_t 393 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq) 394 { 395 struct spdk_bdev_io *io; 396 uint32_t cnt = 0; 397 398 TAILQ_FOREACH(io, tailq, internal.link) { 399 cnt++; 400 } 401 402 return cnt; 403 } 404 405 static void 406 basic(void) 407 { 408 g_init_complete_called = false; 409 setup_test(); 410 CU_ASSERT(g_init_complete_called == true); 411 412 set_thread(0); 413 414 g_get_io_channel = false; 415 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 416 CU_ASSERT(g_ut_threads[0].ch == NULL); 417 418 g_get_io_channel = true; 419 g_create_ch = false; 420 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 421 CU_ASSERT(g_ut_threads[0].ch == NULL); 422 423 g_get_io_channel = true; 424 g_create_ch = true; 425 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 426 CU_ASSERT(g_ut_threads[0].ch != NULL); 427 spdk_put_io_channel(g_ut_threads[0].ch); 428 429 g_fini_start_called = false; 430 teardown_test(); 431 CU_ASSERT(g_fini_start_called == true); 432 } 433 434 static void 435 _bdev_unregistered(void *done, int rc) 436 { 437 CU_ASSERT(rc == 0); 438 *(bool *)done = true; 439 } 440 441 static void 442 unregister_and_close(void) 443 { 444 bool done, remove_notify; 445 struct spdk_bdev_desc *desc = NULL; 446 447 setup_test(); 448 set_thread(0); 449 450 /* setup_test() automatically opens the bdev, 451 * but this test needs to do that in a different 452 * way. */ 453 spdk_bdev_close(g_desc); 454 poll_threads(); 455 456 /* Try hotremoving a bdev with descriptors which don't provide 457 * any context to the notification callback */ 458 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc); 459 SPDK_CU_ASSERT_FATAL(desc != NULL); 460 461 /* There is an open descriptor on the device. Unregister it, 462 * which can't proceed until the descriptor is closed. */ 463 done = false; 464 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); 465 466 /* Poll the threads to allow all events to be processed */ 467 poll_threads(); 468 469 /* Make sure the bdev was not unregistered. We still have a 470 * descriptor open */ 471 CU_ASSERT(done == false); 472 473 spdk_bdev_close(desc); 474 poll_threads(); 475 desc = NULL; 476 477 /* The unregister should have completed */ 478 CU_ASSERT(done == true); 479 480 481 /* Register the bdev again */ 482 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 483 484 remove_notify = false; 485 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc); 486 SPDK_CU_ASSERT_FATAL(desc != NULL); 487 CU_ASSERT(remove_notify == false); 488 489 /* There is an open descriptor on the device. Unregister it, 490 * which can't proceed until the descriptor is closed. */ 491 done = false; 492 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); 493 /* No polling has occurred, so neither of these should execute */ 494 CU_ASSERT(remove_notify == false); 495 CU_ASSERT(done == false); 496 497 /* Prior to the unregister completing, close the descriptor */ 498 spdk_bdev_close(desc); 499 500 /* Poll the threads to allow all events to be processed */ 501 poll_threads(); 502 503 /* Remove notify should not have been called because the 504 * descriptor is already closed. */ 505 CU_ASSERT(remove_notify == false); 506 507 /* The unregister should have completed */ 508 CU_ASSERT(done == true); 509 510 /* Restore the original g_bdev so that we can use teardown_test(). */ 511 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 512 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 513 teardown_test(); 514 } 515 516 static void 517 unregister_and_close_different_threads(void) 518 { 519 bool done; 520 struct spdk_bdev_desc *desc = NULL; 521 522 setup_test(); 523 set_thread(0); 524 525 /* setup_test() automatically opens the bdev, 526 * but this test needs to do that in a different 527 * way. */ 528 spdk_bdev_close(g_desc); 529 poll_threads(); 530 531 set_thread(1); 532 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc); 533 SPDK_CU_ASSERT_FATAL(desc != NULL); 534 done = false; 535 536 set_thread(0); 537 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); 538 539 /* Poll the threads to allow all events to be processed */ 540 poll_threads(); 541 542 /* Make sure the bdev was not unregistered. We still have a 543 * descriptor open */ 544 CU_ASSERT(done == false); 545 546 /* Close the descriptor on thread 1. Poll the thread and confirm the 547 * unregister did not complete, since it was unregistered on thread 0. 548 */ 549 set_thread(1); 550 spdk_bdev_close(desc); 551 poll_thread(1); 552 CU_ASSERT(done == false); 553 554 /* Now poll thread 0 and confirm the unregister completed. */ 555 set_thread(0); 556 poll_thread(0); 557 CU_ASSERT(done == true); 558 559 /* Restore the original g_bdev so that we can use teardown_test(). */ 560 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 561 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 562 teardown_test(); 563 } 564 565 static void 566 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 567 { 568 bool *done = cb_arg; 569 570 CU_ASSERT(success == true); 571 *done = true; 572 spdk_bdev_free_io(bdev_io); 573 } 574 575 static void 576 put_channel_during_reset(void) 577 { 578 struct spdk_io_channel *io_ch; 579 bool done = false; 580 581 setup_test(); 582 583 set_thread(0); 584 io_ch = spdk_bdev_get_io_channel(g_desc); 585 CU_ASSERT(io_ch != NULL); 586 587 /* 588 * Start a reset, but then put the I/O channel before 589 * the deferred messages for the reset get a chance to 590 * execute. 591 */ 592 spdk_bdev_reset(g_desc, io_ch, reset_done, &done); 593 spdk_put_io_channel(io_ch); 594 poll_threads(); 595 stub_complete_io(g_bdev.io_target, 0); 596 597 teardown_test(); 598 } 599 600 static void 601 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 602 { 603 enum spdk_bdev_io_status *status = cb_arg; 604 605 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 606 spdk_bdev_free_io(bdev_io); 607 } 608 609 static void io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 610 611 static void 612 aborted_reset(void) 613 { 614 struct spdk_io_channel *io_ch[2]; 615 enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING, 616 status2 = SPDK_BDEV_IO_STATUS_PENDING; 617 618 setup_test(); 619 620 set_thread(0); 621 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 622 CU_ASSERT(io_ch[0] != NULL); 623 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); 624 poll_threads(); 625 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 626 627 /* 628 * First reset has been submitted on ch0. Now submit a second 629 * reset on ch1 which will get queued since there is already a 630 * reset in progress. 631 */ 632 set_thread(1); 633 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 634 CU_ASSERT(io_ch[1] != NULL); 635 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); 636 poll_threads(); 637 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 638 639 /* 640 * Now destroy ch1. This will abort the queued reset. Check that 641 * the second reset was completed with failed status. Also check 642 * that bdev->internal.reset_in_progress != NULL, since the 643 * original reset has not been completed yet. This ensures that 644 * the bdev code is correctly noticing that the failed reset is 645 * *not* the one that had been submitted to the bdev module. 646 */ 647 set_thread(1); 648 spdk_put_io_channel(io_ch[1]); 649 poll_threads(); 650 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED); 651 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 652 653 /* 654 * Now complete the first reset, verify that it completed with SUCCESS 655 * status and that bdev->internal.reset_in_progress is also set back to NULL. 656 */ 657 set_thread(0); 658 spdk_put_io_channel(io_ch[0]); 659 stub_complete_io(g_bdev.io_target, 0); 660 poll_threads(); 661 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 662 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 663 664 teardown_test(); 665 } 666 667 static void 668 aborted_reset_no_outstanding_io(void) 669 { 670 struct spdk_io_channel *io_ch[2]; 671 struct spdk_bdev_channel *bdev_ch[2]; 672 struct spdk_bdev *bdev[2]; 673 enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING, 674 status2 = SPDK_BDEV_IO_STATUS_PENDING; 675 676 setup_test(); 677 678 /* 679 * This time we test the reset without any outstanding IO 680 * present on the bdev channel, so both resets should finish 681 * immediately. 682 */ 683 684 set_thread(0); 685 /* Set reset_io_drain_timeout to allow bdev 686 * reset to stay pending until we call abort. */ 687 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 688 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 689 bdev[0] = bdev_ch[0]->bdev; 690 bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 691 CU_ASSERT(io_ch[0] != NULL); 692 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); 693 poll_threads(); 694 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 695 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 696 spdk_put_io_channel(io_ch[0]); 697 698 set_thread(1); 699 /* Set reset_io_drain_timeout to allow bdev 700 * reset to stay pending until we call abort. */ 701 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 702 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 703 bdev[1] = bdev_ch[1]->bdev; 704 bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 705 CU_ASSERT(io_ch[1] != NULL); 706 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); 707 poll_threads(); 708 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 709 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS); 710 spdk_put_io_channel(io_ch[1]); 711 712 stub_complete_io(g_bdev.io_target, 0); 713 poll_threads(); 714 715 teardown_test(); 716 } 717 718 719 static void 720 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 721 { 722 enum spdk_bdev_io_status *status = cb_arg; 723 724 *status = bdev_io->internal.status; 725 spdk_bdev_free_io(bdev_io); 726 } 727 728 static void 729 io_during_reset(void) 730 { 731 struct spdk_io_channel *io_ch[2]; 732 struct spdk_bdev_channel *bdev_ch[2]; 733 enum spdk_bdev_io_status status0, status1, status_reset; 734 int rc; 735 736 setup_test(); 737 738 /* 739 * First test normal case - submit an I/O on each of two channels (with no resets) 740 * and verify they complete successfully. 741 */ 742 set_thread(0); 743 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 744 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 745 CU_ASSERT(bdev_ch[0]->flags == 0); 746 status0 = SPDK_BDEV_IO_STATUS_PENDING; 747 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 748 CU_ASSERT(rc == 0); 749 750 set_thread(1); 751 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 752 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 753 CU_ASSERT(bdev_ch[1]->flags == 0); 754 status1 = SPDK_BDEV_IO_STATUS_PENDING; 755 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 756 CU_ASSERT(rc == 0); 757 758 poll_threads(); 759 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 760 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 761 762 set_thread(0); 763 stub_complete_io(g_bdev.io_target, 0); 764 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 765 766 set_thread(1); 767 stub_complete_io(g_bdev.io_target, 0); 768 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 769 770 /* 771 * Now submit a reset, and leave it pending while we submit I/O on two different 772 * channels. These I/O should be failed by the bdev layer since the reset is in 773 * progress. 774 */ 775 set_thread(0); 776 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 777 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset); 778 CU_ASSERT(rc == 0); 779 780 CU_ASSERT(bdev_ch[0]->flags == 0); 781 CU_ASSERT(bdev_ch[1]->flags == 0); 782 poll_threads(); 783 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS); 784 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS); 785 786 set_thread(0); 787 status0 = SPDK_BDEV_IO_STATUS_PENDING; 788 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 789 CU_ASSERT(rc == 0); 790 791 set_thread(1); 792 status1 = SPDK_BDEV_IO_STATUS_PENDING; 793 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 794 CU_ASSERT(rc == 0); 795 796 /* 797 * A reset is in progress so these read I/O should complete with aborted. Note that we 798 * need to poll_threads() since I/O completed inline have their completion deferred. 799 */ 800 poll_threads(); 801 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 802 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED); 803 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED); 804 805 /* 806 * Complete the reset 807 */ 808 set_thread(0); 809 stub_complete_io(g_bdev.io_target, 0); 810 811 /* 812 * Only poll thread 0. We should not get a completion. 813 */ 814 poll_thread(0); 815 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 816 817 /* 818 * Poll both thread 0 and 1 so the messages can propagate and we 819 * get a completion. 820 */ 821 poll_threads(); 822 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 823 824 spdk_put_io_channel(io_ch[0]); 825 set_thread(1); 826 spdk_put_io_channel(io_ch[1]); 827 poll_threads(); 828 829 teardown_test(); 830 } 831 832 static uint32_t 833 count_queued_resets(void *io_target) 834 { 835 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 836 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 837 struct spdk_bdev_io *io; 838 uint32_t submitted_resets = 0; 839 840 TAILQ_FOREACH(io, &ch->outstanding_io, module_link) { 841 if (io->type == SPDK_BDEV_IO_TYPE_RESET) { 842 submitted_resets++; 843 } 844 } 845 846 spdk_put_io_channel(_ch); 847 848 return submitted_resets; 849 } 850 851 static void 852 reset_completions(void) 853 { 854 struct spdk_io_channel *io_ch; 855 struct spdk_bdev_channel *bdev_ch; 856 struct spdk_bdev *bdev; 857 enum spdk_bdev_io_status status0, status_reset; 858 int rc, iter; 859 860 setup_test(); 861 862 /* This test covers four test cases: 863 * 1) reset_io_drain_timeout of a bdev is greater than 0 864 * 2) No outstandind IO are present on any bdev channel 865 * 3) Outstanding IO finish during bdev reset 866 * 4) Outstanding IO do not finish before reset is done waiting 867 * for them. 868 * 869 * Above conditions mainly affect the timing of bdev reset completion 870 * and whether a reset should be skipped via spdk_bdev_io_complete() 871 * or sent down to the underlying bdev module via bdev_io_submit_reset(). */ 872 873 /* Test preparation */ 874 set_thread(0); 875 io_ch = spdk_bdev_get_io_channel(g_desc); 876 bdev_ch = spdk_io_channel_get_ctx(io_ch); 877 CU_ASSERT(bdev_ch->flags == 0); 878 879 880 /* Test case 1) reset_io_drain_timeout set to 0. Reset should be sent down immediately. */ 881 bdev = &g_bdev.bdev; 882 bdev->reset_io_drain_timeout = 0; 883 884 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 885 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 886 CU_ASSERT(rc == 0); 887 poll_threads(); 888 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1); 889 890 /* Call reset completion inside bdev module. */ 891 stub_complete_io(g_bdev.io_target, 0); 892 poll_threads(); 893 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 894 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 895 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 896 897 898 /* Test case 2) no outstanding IO are present. Reset should perform one iteration over 899 * channels and then be skipped. */ 900 bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 901 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 902 903 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 904 CU_ASSERT(rc == 0); 905 poll_threads(); 906 /* Reset was never submitted to the bdev module. */ 907 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 908 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 909 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 910 911 912 /* Test case 3) outstanding IO finish during bdev reset procedure. Reset should initiate 913 * wait poller to check for IO completions every second, until reset_io_drain_timeout is 914 * reached, but finish earlier than this threshold. */ 915 status0 = SPDK_BDEV_IO_STATUS_PENDING; 916 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 917 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0); 918 CU_ASSERT(rc == 0); 919 920 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 921 CU_ASSERT(rc == 0); 922 poll_threads(); 923 /* The reset just started and should not have been submitted yet. */ 924 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 925 926 poll_threads(); 927 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 928 /* Let the poller wait for about half the time then complete outstanding IO. */ 929 for (iter = 0; iter < 2; iter++) { 930 /* Reset is still processing and not submitted at this point. */ 931 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 932 spdk_delay_us(1000 * 1000); 933 poll_threads(); 934 poll_threads(); 935 } 936 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 937 stub_complete_io(g_bdev.io_target, 0); 938 poll_threads(); 939 spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD); 940 poll_threads(); 941 poll_threads(); 942 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 943 /* Sending reset to the bdev module has been skipped. */ 944 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 945 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 946 947 948 /* Test case 4) outstanding IO are still present after reset_io_drain_timeout 949 * seconds have passed. */ 950 status0 = SPDK_BDEV_IO_STATUS_PENDING; 951 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 952 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0); 953 CU_ASSERT(rc == 0); 954 955 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 956 CU_ASSERT(rc == 0); 957 poll_threads(); 958 /* The reset just started and should not have been submitted yet. */ 959 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 960 961 poll_threads(); 962 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 963 /* Let the poller wait for reset_io_drain_timeout seconds. */ 964 for (iter = 0; iter < bdev->reset_io_drain_timeout; iter++) { 965 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 966 spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD); 967 poll_threads(); 968 poll_threads(); 969 } 970 971 /* After timing out, the reset should have been sent to the module. */ 972 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1); 973 /* Complete reset submitted to the module and the read IO. */ 974 stub_complete_io(g_bdev.io_target, 0); 975 poll_threads(); 976 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 977 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 978 979 980 /* Destroy the channel and end the test. */ 981 spdk_put_io_channel(io_ch); 982 poll_threads(); 983 984 teardown_test(); 985 } 986 987 988 static void 989 basic_qos(void) 990 { 991 struct spdk_io_channel *io_ch[2]; 992 struct spdk_bdev_channel *bdev_ch[2]; 993 struct spdk_bdev *bdev; 994 enum spdk_bdev_io_status status, abort_status; 995 int rc; 996 997 setup_test(); 998 999 /* Enable QoS */ 1000 bdev = &g_bdev.bdev; 1001 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 1002 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 1003 /* 1004 * Enable read/write IOPS, read only byte per second and 1005 * read/write byte per second rate limits. 1006 * In this case, all rate limits will take equal effect. 1007 */ 1008 /* 2000 read/write I/O per second, or 2 per millisecond */ 1009 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000; 1010 /* 8K read/write byte per millisecond with 4K block size */ 1011 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000; 1012 /* 8K read only byte per millisecond with 4K block size */ 1013 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000; 1014 1015 g_get_io_channel = true; 1016 1017 set_thread(0); 1018 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1019 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1020 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1021 1022 set_thread(1); 1023 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1024 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1025 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1026 1027 /* 1028 * Send an I/O on thread 0, which is where the QoS thread is running. 1029 */ 1030 set_thread(0); 1031 status = SPDK_BDEV_IO_STATUS_PENDING; 1032 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); 1033 CU_ASSERT(rc == 0); 1034 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1035 poll_threads(); 1036 stub_complete_io(g_bdev.io_target, 0); 1037 poll_threads(); 1038 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); 1039 1040 /* Send an I/O on thread 1. The QoS thread is not running here. */ 1041 status = SPDK_BDEV_IO_STATUS_PENDING; 1042 set_thread(1); 1043 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); 1044 CU_ASSERT(rc == 0); 1045 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1046 poll_threads(); 1047 /* Complete I/O on thread 0. This should not complete the I/O we submitted. */ 1048 set_thread(0); 1049 stub_complete_io(g_bdev.io_target, 0); 1050 poll_threads(); 1051 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1052 /* Now complete I/O on original thread 1. */ 1053 set_thread(1); 1054 poll_threads(); 1055 stub_complete_io(g_bdev.io_target, 0); 1056 poll_threads(); 1057 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); 1058 1059 /* Reset rate limit for the next test cases. */ 1060 spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC); 1061 poll_threads(); 1062 1063 /* 1064 * Test abort request when QoS is enabled. 1065 */ 1066 1067 /* Send an I/O on thread 0, which is where the QoS thread is running. */ 1068 set_thread(0); 1069 status = SPDK_BDEV_IO_STATUS_PENDING; 1070 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); 1071 CU_ASSERT(rc == 0); 1072 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1073 /* Send an abort to the I/O on the same thread. */ 1074 abort_status = SPDK_BDEV_IO_STATUS_PENDING; 1075 rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status); 1076 CU_ASSERT(rc == 0); 1077 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING); 1078 poll_threads(); 1079 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1080 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED); 1081 1082 /* Send an I/O on thread 1. The QoS thread is not running here. */ 1083 status = SPDK_BDEV_IO_STATUS_PENDING; 1084 set_thread(1); 1085 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); 1086 CU_ASSERT(rc == 0); 1087 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 1088 poll_threads(); 1089 /* Send an abort to the I/O on the same thread. */ 1090 abort_status = SPDK_BDEV_IO_STATUS_PENDING; 1091 rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status); 1092 CU_ASSERT(rc == 0); 1093 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING); 1094 poll_threads(); 1095 /* Complete the I/O with failure and the abort with success on thread 1. */ 1096 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1097 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED); 1098 1099 set_thread(0); 1100 1101 /* 1102 * Close the descriptor only, which should stop the qos channel as 1103 * the last descriptor removed. 1104 */ 1105 spdk_bdev_close(g_desc); 1106 poll_threads(); 1107 CU_ASSERT(bdev->internal.qos->ch == NULL); 1108 1109 /* 1110 * Open the bdev again which shall setup the qos channel as the 1111 * channels are valid. 1112 */ 1113 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 1114 poll_threads(); 1115 CU_ASSERT(bdev->internal.qos->ch != NULL); 1116 1117 /* Tear down the channels */ 1118 set_thread(0); 1119 spdk_put_io_channel(io_ch[0]); 1120 set_thread(1); 1121 spdk_put_io_channel(io_ch[1]); 1122 poll_threads(); 1123 set_thread(0); 1124 1125 /* Close the descriptor, which should stop the qos channel */ 1126 spdk_bdev_close(g_desc); 1127 poll_threads(); 1128 CU_ASSERT(bdev->internal.qos->ch == NULL); 1129 1130 /* Open the bdev again, no qos channel setup without valid channels. */ 1131 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 1132 poll_threads(); 1133 CU_ASSERT(bdev->internal.qos->ch == NULL); 1134 1135 /* Create the channels in reverse order. */ 1136 set_thread(1); 1137 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1138 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1139 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1140 1141 set_thread(0); 1142 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1143 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1144 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1145 1146 /* Confirm that the qos thread is now thread 1 */ 1147 CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]); 1148 1149 /* Tear down the channels */ 1150 set_thread(0); 1151 spdk_put_io_channel(io_ch[0]); 1152 set_thread(1); 1153 spdk_put_io_channel(io_ch[1]); 1154 poll_threads(); 1155 1156 set_thread(0); 1157 1158 teardown_test(); 1159 } 1160 1161 static void 1162 io_during_qos_queue(void) 1163 { 1164 struct spdk_io_channel *io_ch[2]; 1165 struct spdk_bdev_channel *bdev_ch[2]; 1166 struct spdk_bdev *bdev; 1167 enum spdk_bdev_io_status status0, status1, status2; 1168 int rc; 1169 1170 setup_test(); 1171 MOCK_SET(spdk_get_ticks, 0); 1172 1173 /* Enable QoS */ 1174 bdev = &g_bdev.bdev; 1175 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 1176 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 1177 1178 /* 1179 * Enable read/write IOPS, read only byte per sec, write only 1180 * byte per sec and read/write byte per sec rate limits. 1181 * In this case, both read only and write only byte per sec 1182 * rate limit will take effect. 1183 */ 1184 /* 4000 read/write I/O per second, or 4 per millisecond */ 1185 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000; 1186 /* 8K byte per millisecond with 4K block size */ 1187 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000; 1188 /* 4K byte per millisecond with 4K block size */ 1189 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000; 1190 /* 4K byte per millisecond with 4K block size */ 1191 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000; 1192 1193 g_get_io_channel = true; 1194 1195 /* Create channels */ 1196 set_thread(0); 1197 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1198 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1199 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1200 1201 set_thread(1); 1202 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1203 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1204 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1205 1206 /* Send two read I/Os */ 1207 status1 = SPDK_BDEV_IO_STATUS_PENDING; 1208 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 1209 CU_ASSERT(rc == 0); 1210 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1211 set_thread(0); 1212 status0 = SPDK_BDEV_IO_STATUS_PENDING; 1213 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 1214 CU_ASSERT(rc == 0); 1215 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 1216 /* Send one write I/O */ 1217 status2 = SPDK_BDEV_IO_STATUS_PENDING; 1218 rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2); 1219 CU_ASSERT(rc == 0); 1220 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING); 1221 1222 /* Complete any I/O that arrived at the disk */ 1223 poll_threads(); 1224 set_thread(1); 1225 stub_complete_io(g_bdev.io_target, 0); 1226 set_thread(0); 1227 stub_complete_io(g_bdev.io_target, 0); 1228 poll_threads(); 1229 1230 /* Only one of the two read I/Os should complete. (logical XOR) */ 1231 if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) { 1232 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1233 } else { 1234 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 1235 } 1236 /* The write I/O should complete. */ 1237 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS); 1238 1239 /* Advance in time by a millisecond */ 1240 spdk_delay_us(1000); 1241 1242 /* Complete more I/O */ 1243 poll_threads(); 1244 set_thread(1); 1245 stub_complete_io(g_bdev.io_target, 0); 1246 set_thread(0); 1247 stub_complete_io(g_bdev.io_target, 0); 1248 poll_threads(); 1249 1250 /* Now the second read I/O should be done */ 1251 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 1252 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 1253 1254 /* Tear down the channels */ 1255 set_thread(1); 1256 spdk_put_io_channel(io_ch[1]); 1257 set_thread(0); 1258 spdk_put_io_channel(io_ch[0]); 1259 poll_threads(); 1260 1261 teardown_test(); 1262 } 1263 1264 static void 1265 io_during_qos_reset(void) 1266 { 1267 struct spdk_io_channel *io_ch[2]; 1268 struct spdk_bdev_channel *bdev_ch[2]; 1269 struct spdk_bdev *bdev; 1270 enum spdk_bdev_io_status status0, status1, reset_status; 1271 int rc; 1272 1273 setup_test(); 1274 MOCK_SET(spdk_get_ticks, 0); 1275 1276 /* Enable QoS */ 1277 bdev = &g_bdev.bdev; 1278 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 1279 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 1280 1281 /* 1282 * Enable read/write IOPS, write only byte per sec and 1283 * read/write byte per second rate limits. 1284 * In this case, read/write byte per second rate limit will 1285 * take effect first. 1286 */ 1287 /* 2000 read/write I/O per second, or 2 per millisecond */ 1288 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000; 1289 /* 4K byte per millisecond with 4K block size */ 1290 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000; 1291 /* 8K byte per millisecond with 4K block size */ 1292 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000; 1293 1294 g_get_io_channel = true; 1295 1296 /* Create channels */ 1297 set_thread(0); 1298 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1299 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1300 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1301 1302 set_thread(1); 1303 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1304 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1305 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1306 1307 /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */ 1308 status1 = SPDK_BDEV_IO_STATUS_PENDING; 1309 rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 1310 CU_ASSERT(rc == 0); 1311 set_thread(0); 1312 status0 = SPDK_BDEV_IO_STATUS_PENDING; 1313 rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 1314 CU_ASSERT(rc == 0); 1315 1316 poll_threads(); 1317 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1318 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 1319 1320 /* Reset the bdev. */ 1321 reset_status = SPDK_BDEV_IO_STATUS_PENDING; 1322 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status); 1323 CU_ASSERT(rc == 0); 1324 1325 /* Complete any I/O that arrived at the disk */ 1326 poll_threads(); 1327 set_thread(1); 1328 stub_complete_io(g_bdev.io_target, 0); 1329 set_thread(0); 1330 stub_complete_io(g_bdev.io_target, 0); 1331 poll_threads(); 1332 1333 CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1334 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED); 1335 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED); 1336 1337 /* Tear down the channels */ 1338 set_thread(1); 1339 spdk_put_io_channel(io_ch[1]); 1340 set_thread(0); 1341 spdk_put_io_channel(io_ch[0]); 1342 poll_threads(); 1343 1344 teardown_test(); 1345 } 1346 1347 static void 1348 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1349 { 1350 enum spdk_bdev_io_status *status = cb_arg; 1351 1352 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 1353 spdk_bdev_free_io(bdev_io); 1354 } 1355 1356 static void 1357 enomem(void) 1358 { 1359 struct spdk_io_channel *io_ch; 1360 struct spdk_bdev_channel *bdev_ch; 1361 struct spdk_bdev_shared_resource *shared_resource; 1362 struct ut_bdev_channel *ut_ch; 1363 const uint32_t IO_ARRAY_SIZE = 64; 1364 const uint32_t AVAIL = 20; 1365 enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset; 1366 uint32_t nomem_cnt, i; 1367 struct spdk_bdev_io *first_io; 1368 int rc; 1369 1370 setup_test(); 1371 1372 set_thread(0); 1373 io_ch = spdk_bdev_get_io_channel(g_desc); 1374 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1375 shared_resource = bdev_ch->shared_resource; 1376 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1377 ut_ch->avail_cnt = AVAIL; 1378 1379 /* First submit a number of IOs equal to what the channel can support. */ 1380 for (i = 0; i < AVAIL; i++) { 1381 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1382 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1383 CU_ASSERT(rc == 0); 1384 } 1385 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1386 1387 /* 1388 * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto 1389 * the enomem_io list. 1390 */ 1391 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1392 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1393 CU_ASSERT(rc == 0); 1394 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1395 first_io = TAILQ_FIRST(&shared_resource->nomem_io); 1396 1397 /* 1398 * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind 1399 * the first_io above. 1400 */ 1401 for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) { 1402 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1403 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1404 CU_ASSERT(rc == 0); 1405 } 1406 1407 /* Assert that first_io is still at the head of the list. */ 1408 CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io); 1409 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL)); 1410 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); 1411 CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT)); 1412 1413 /* 1414 * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have 1415 * changed since completing just 1 I/O should not trigger retrying the queued nomem_io 1416 * list. 1417 */ 1418 stub_complete_io(g_bdev.io_target, 1); 1419 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); 1420 1421 /* 1422 * Complete enough I/O to hit the nomem_threshold. This should trigger retrying nomem_io, 1423 * and we should see I/O get resubmitted to the test bdev module. 1424 */ 1425 stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1); 1426 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt); 1427 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); 1428 1429 /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */ 1430 stub_complete_io(g_bdev.io_target, 1); 1431 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); 1432 1433 /* 1434 * Send a reset and confirm that all I/O are completed, including the ones that 1435 * were queued on the nomem_io list. 1436 */ 1437 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 1438 rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset); 1439 poll_threads(); 1440 CU_ASSERT(rc == 0); 1441 /* This will complete the reset. */ 1442 stub_complete_io(g_bdev.io_target, 0); 1443 1444 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0); 1445 CU_ASSERT(shared_resource->io_outstanding == 0); 1446 1447 spdk_put_io_channel(io_ch); 1448 poll_threads(); 1449 teardown_test(); 1450 } 1451 1452 static void 1453 enomem_multi_bdev(void) 1454 { 1455 struct spdk_io_channel *io_ch; 1456 struct spdk_bdev_channel *bdev_ch; 1457 struct spdk_bdev_shared_resource *shared_resource; 1458 struct ut_bdev_channel *ut_ch; 1459 const uint32_t IO_ARRAY_SIZE = 64; 1460 const uint32_t AVAIL = 20; 1461 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1462 uint32_t i; 1463 struct ut_bdev *second_bdev; 1464 struct spdk_bdev_desc *second_desc = NULL; 1465 struct spdk_bdev_channel *second_bdev_ch; 1466 struct spdk_io_channel *second_ch; 1467 int rc; 1468 1469 setup_test(); 1470 1471 /* Register second bdev with the same io_target */ 1472 second_bdev = calloc(1, sizeof(*second_bdev)); 1473 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 1474 register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target); 1475 spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc); 1476 SPDK_CU_ASSERT_FATAL(second_desc != NULL); 1477 1478 set_thread(0); 1479 io_ch = spdk_bdev_get_io_channel(g_desc); 1480 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1481 shared_resource = bdev_ch->shared_resource; 1482 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1483 ut_ch->avail_cnt = AVAIL; 1484 1485 second_ch = spdk_bdev_get_io_channel(second_desc); 1486 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 1487 SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource); 1488 1489 /* Saturate io_target through bdev A. */ 1490 for (i = 0; i < AVAIL; i++) { 1491 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1492 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1493 CU_ASSERT(rc == 0); 1494 } 1495 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1496 1497 /* 1498 * Now submit I/O through the second bdev. This should fail with ENOMEM 1499 * and then go onto the nomem_io list. 1500 */ 1501 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1502 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1503 CU_ASSERT(rc == 0); 1504 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1505 1506 /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */ 1507 stub_complete_io(g_bdev.io_target, AVAIL); 1508 1509 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io)); 1510 CU_ASSERT(shared_resource->io_outstanding == 1); 1511 1512 /* Now complete our retried I/O */ 1513 stub_complete_io(g_bdev.io_target, 1); 1514 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0); 1515 1516 spdk_put_io_channel(io_ch); 1517 spdk_put_io_channel(second_ch); 1518 spdk_bdev_close(second_desc); 1519 unregister_bdev(second_bdev); 1520 poll_threads(); 1521 free(second_bdev); 1522 teardown_test(); 1523 } 1524 1525 static void 1526 enomem_multi_bdev_unregister(void) 1527 { 1528 struct spdk_io_channel *io_ch; 1529 struct spdk_bdev_channel *bdev_ch; 1530 struct spdk_bdev_shared_resource *shared_resource; 1531 struct ut_bdev_channel *ut_ch; 1532 const uint32_t IO_ARRAY_SIZE = 64; 1533 const uint32_t AVAIL = 20; 1534 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1535 uint32_t i; 1536 int rc; 1537 1538 setup_test(); 1539 1540 set_thread(0); 1541 io_ch = spdk_bdev_get_io_channel(g_desc); 1542 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1543 shared_resource = bdev_ch->shared_resource; 1544 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1545 ut_ch->avail_cnt = AVAIL; 1546 1547 /* Saturate io_target through the bdev. */ 1548 for (i = 0; i < AVAIL; i++) { 1549 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1550 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1551 CU_ASSERT(rc == 0); 1552 } 1553 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1554 1555 /* 1556 * Now submit I/O through the bdev. This should fail with ENOMEM 1557 * and then go onto the nomem_io list. 1558 */ 1559 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1560 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1561 CU_ASSERT(rc == 0); 1562 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1563 1564 /* Unregister the bdev to abort the IOs from nomem_io queue. */ 1565 unregister_bdev(&g_bdev); 1566 CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED); 1567 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io)); 1568 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL); 1569 1570 /* Complete the bdev's I/O. */ 1571 stub_complete_io(g_bdev.io_target, AVAIL); 1572 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0); 1573 1574 spdk_put_io_channel(io_ch); 1575 poll_threads(); 1576 teardown_test(); 1577 } 1578 1579 static void 1580 enomem_multi_io_target(void) 1581 { 1582 struct spdk_io_channel *io_ch; 1583 struct spdk_bdev_channel *bdev_ch; 1584 struct ut_bdev_channel *ut_ch; 1585 const uint32_t IO_ARRAY_SIZE = 64; 1586 const uint32_t AVAIL = 20; 1587 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1588 uint32_t i; 1589 int new_io_device; 1590 struct ut_bdev *second_bdev; 1591 struct spdk_bdev_desc *second_desc = NULL; 1592 struct spdk_bdev_channel *second_bdev_ch; 1593 struct spdk_io_channel *second_ch; 1594 int rc; 1595 1596 setup_test(); 1597 1598 /* Create new io_target and a second bdev using it */ 1599 spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch, 1600 sizeof(struct ut_bdev_channel), NULL); 1601 second_bdev = calloc(1, sizeof(*second_bdev)); 1602 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 1603 register_bdev(second_bdev, "ut_bdev2", &new_io_device); 1604 spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc); 1605 SPDK_CU_ASSERT_FATAL(second_desc != NULL); 1606 1607 set_thread(0); 1608 io_ch = spdk_bdev_get_io_channel(g_desc); 1609 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1610 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1611 ut_ch->avail_cnt = AVAIL; 1612 1613 /* Different io_target should imply a different shared_resource */ 1614 second_ch = spdk_bdev_get_io_channel(second_desc); 1615 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 1616 SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource); 1617 1618 /* Saturate io_target through bdev A. */ 1619 for (i = 0; i < AVAIL; i++) { 1620 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1621 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1622 CU_ASSERT(rc == 0); 1623 } 1624 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1625 1626 /* Issue one more I/O to fill ENOMEM list. */ 1627 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1628 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1629 CU_ASSERT(rc == 0); 1630 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1631 1632 /* 1633 * Now submit I/O through the second bdev. This should go through and complete 1634 * successfully because we're using a different io_device underneath. 1635 */ 1636 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1637 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1638 CU_ASSERT(rc == 0); 1639 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io)); 1640 stub_complete_io(second_bdev->io_target, 1); 1641 1642 /* Cleanup; Complete outstanding I/O. */ 1643 stub_complete_io(g_bdev.io_target, AVAIL); 1644 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1645 /* Complete the ENOMEM I/O */ 1646 stub_complete_io(g_bdev.io_target, 1); 1647 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); 1648 1649 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1650 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); 1651 spdk_put_io_channel(io_ch); 1652 spdk_put_io_channel(second_ch); 1653 spdk_bdev_close(second_desc); 1654 unregister_bdev(second_bdev); 1655 spdk_io_device_unregister(&new_io_device, NULL); 1656 poll_threads(); 1657 free(second_bdev); 1658 teardown_test(); 1659 } 1660 1661 static void 1662 qos_dynamic_enable_done(void *cb_arg, int status) 1663 { 1664 int *rc = cb_arg; 1665 *rc = status; 1666 } 1667 1668 static void 1669 qos_dynamic_enable(void) 1670 { 1671 struct spdk_io_channel *io_ch[2]; 1672 struct spdk_bdev_channel *bdev_ch[2]; 1673 struct spdk_bdev *bdev; 1674 enum spdk_bdev_io_status bdev_io_status[2]; 1675 uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {}; 1676 int status, second_status, rc, i; 1677 1678 setup_test(); 1679 MOCK_SET(spdk_get_ticks, 0); 1680 1681 for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) { 1682 limits[i] = UINT64_MAX; 1683 } 1684 1685 bdev = &g_bdev.bdev; 1686 1687 g_get_io_channel = true; 1688 1689 /* Create channels */ 1690 set_thread(0); 1691 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1692 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1693 CU_ASSERT(bdev_ch[0]->flags == 0); 1694 1695 set_thread(1); 1696 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1697 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1698 CU_ASSERT(bdev_ch[1]->flags == 0); 1699 1700 set_thread(0); 1701 1702 /* 1703 * Enable QoS: Read/Write IOPS, Read/Write byte, 1704 * Read only byte and Write only byte per second 1705 * rate limits. 1706 * More than 10 I/Os allowed per timeslice. 1707 */ 1708 status = -1; 1709 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1710 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100; 1711 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100; 1712 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10; 1713 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1714 poll_threads(); 1715 CU_ASSERT(status == 0); 1716 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1717 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1718 1719 /* 1720 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice. 1721 * Additional I/O will then be queued. 1722 */ 1723 set_thread(0); 1724 for (i = 0; i < 10; i++) { 1725 bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING; 1726 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]); 1727 CU_ASSERT(rc == 0); 1728 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING); 1729 poll_thread(0); 1730 stub_complete_io(g_bdev.io_target, 0); 1731 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS); 1732 } 1733 1734 /* 1735 * Send two more I/O. These I/O will be queued since the current timeslice allotment has been 1736 * filled already. We want to test that when QoS is disabled that these two I/O: 1737 * 1) are not aborted 1738 * 2) are sent back to their original thread for resubmission 1739 */ 1740 bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING; 1741 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]); 1742 CU_ASSERT(rc == 0); 1743 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING); 1744 set_thread(1); 1745 bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING; 1746 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]); 1747 CU_ASSERT(rc == 0); 1748 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING); 1749 poll_threads(); 1750 1751 /* 1752 * Disable QoS: Read/Write IOPS, Read/Write byte, 1753 * Read only byte rate limits 1754 */ 1755 status = -1; 1756 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1757 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0; 1758 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0; 1759 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1760 poll_threads(); 1761 CU_ASSERT(status == 0); 1762 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1763 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1764 1765 /* Disable QoS: Write only Byte per second rate limit */ 1766 status = -1; 1767 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0; 1768 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1769 poll_threads(); 1770 CU_ASSERT(status == 0); 1771 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1772 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1773 1774 /* 1775 * All I/O should have been resubmitted back on their original thread. Complete 1776 * all I/O on thread 0, and ensure that only the thread 0 I/O was completed. 1777 */ 1778 set_thread(0); 1779 stub_complete_io(g_bdev.io_target, 0); 1780 poll_threads(); 1781 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS); 1782 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING); 1783 1784 /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */ 1785 set_thread(1); 1786 stub_complete_io(g_bdev.io_target, 0); 1787 poll_threads(); 1788 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS); 1789 1790 /* Disable QoS again */ 1791 status = -1; 1792 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1793 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1794 poll_threads(); 1795 CU_ASSERT(status == 0); /* This should succeed */ 1796 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1797 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1798 1799 /* Enable QoS on thread 0 */ 1800 status = -1; 1801 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1802 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1803 poll_threads(); 1804 CU_ASSERT(status == 0); 1805 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1806 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1807 1808 /* Disable QoS on thread 1 */ 1809 set_thread(1); 1810 status = -1; 1811 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1812 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1813 /* Don't poll yet. This should leave the channels with QoS enabled */ 1814 CU_ASSERT(status == -1); 1815 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1816 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1817 1818 /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */ 1819 second_status = 0; 1820 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10; 1821 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status); 1822 poll_threads(); 1823 CU_ASSERT(status == 0); /* The disable should succeed */ 1824 CU_ASSERT(second_status < 0); /* The enable should fail */ 1825 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1826 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1827 1828 /* Enable QoS on thread 1. This should succeed now that the disable has completed. */ 1829 status = -1; 1830 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1831 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1832 poll_threads(); 1833 CU_ASSERT(status == 0); 1834 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1835 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1836 1837 /* Tear down the channels */ 1838 set_thread(0); 1839 spdk_put_io_channel(io_ch[0]); 1840 set_thread(1); 1841 spdk_put_io_channel(io_ch[1]); 1842 poll_threads(); 1843 1844 set_thread(0); 1845 teardown_test(); 1846 } 1847 1848 static void 1849 histogram_status_cb(void *cb_arg, int status) 1850 { 1851 g_status = status; 1852 } 1853 1854 static void 1855 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 1856 { 1857 g_status = status; 1858 g_histogram = histogram; 1859 } 1860 1861 static void 1862 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 1863 uint64_t total, uint64_t so_far) 1864 { 1865 g_count += count; 1866 } 1867 1868 static void 1869 bdev_histograms_mt(void) 1870 { 1871 struct spdk_io_channel *ch[2]; 1872 struct spdk_histogram_data *histogram; 1873 uint8_t buf[4096]; 1874 int status = false; 1875 int rc; 1876 1877 1878 setup_test(); 1879 1880 set_thread(0); 1881 ch[0] = spdk_bdev_get_io_channel(g_desc); 1882 CU_ASSERT(ch[0] != NULL); 1883 1884 set_thread(1); 1885 ch[1] = spdk_bdev_get_io_channel(g_desc); 1886 CU_ASSERT(ch[1] != NULL); 1887 1888 1889 /* Enable histogram */ 1890 spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true); 1891 poll_threads(); 1892 CU_ASSERT(g_status == 0); 1893 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true); 1894 1895 /* Allocate histogram */ 1896 histogram = spdk_histogram_data_alloc(); 1897 1898 /* Check if histogram is zeroed */ 1899 spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL); 1900 poll_threads(); 1901 CU_ASSERT(g_status == 0); 1902 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 1903 1904 g_count = 0; 1905 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 1906 1907 CU_ASSERT(g_count == 0); 1908 1909 set_thread(0); 1910 rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status); 1911 CU_ASSERT(rc == 0); 1912 1913 spdk_delay_us(10); 1914 stub_complete_io(g_bdev.io_target, 1); 1915 poll_threads(); 1916 CU_ASSERT(status == true); 1917 1918 1919 set_thread(1); 1920 rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status); 1921 CU_ASSERT(rc == 0); 1922 1923 spdk_delay_us(10); 1924 stub_complete_io(g_bdev.io_target, 1); 1925 poll_threads(); 1926 CU_ASSERT(status == true); 1927 1928 set_thread(0); 1929 1930 /* Check if histogram gathered data from all I/O channels */ 1931 spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL); 1932 poll_threads(); 1933 CU_ASSERT(g_status == 0); 1934 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true); 1935 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 1936 1937 g_count = 0; 1938 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 1939 CU_ASSERT(g_count == 2); 1940 1941 /* Disable histogram */ 1942 spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false); 1943 poll_threads(); 1944 CU_ASSERT(g_status == 0); 1945 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false); 1946 1947 spdk_histogram_data_free(histogram); 1948 1949 /* Tear down the channels */ 1950 set_thread(0); 1951 spdk_put_io_channel(ch[0]); 1952 set_thread(1); 1953 spdk_put_io_channel(ch[1]); 1954 poll_threads(); 1955 set_thread(0); 1956 teardown_test(); 1957 1958 } 1959 1960 struct timeout_io_cb_arg { 1961 struct iovec iov; 1962 uint8_t type; 1963 }; 1964 1965 static int 1966 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 1967 { 1968 struct spdk_bdev_io *bdev_io; 1969 int n = 0; 1970 1971 if (!ch) { 1972 return -1; 1973 } 1974 1975 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 1976 n++; 1977 } 1978 1979 return n; 1980 } 1981 1982 static void 1983 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 1984 { 1985 struct timeout_io_cb_arg *ctx = cb_arg; 1986 1987 ctx->type = bdev_io->type; 1988 ctx->iov.iov_base = bdev_io->iov.iov_base; 1989 ctx->iov.iov_len = bdev_io->iov.iov_len; 1990 } 1991 1992 static bool g_io_done; 1993 1994 static void 1995 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1996 { 1997 g_io_done = true; 1998 spdk_bdev_free_io(bdev_io); 1999 } 2000 2001 static void 2002 bdev_set_io_timeout_mt(void) 2003 { 2004 struct spdk_io_channel *ch[3]; 2005 struct spdk_bdev_channel *bdev_ch[3]; 2006 struct timeout_io_cb_arg cb_arg; 2007 2008 setup_test(); 2009 2010 g_bdev.bdev.optimal_io_boundary = 16; 2011 g_bdev.bdev.split_on_optimal_io_boundary = true; 2012 2013 set_thread(0); 2014 ch[0] = spdk_bdev_get_io_channel(g_desc); 2015 CU_ASSERT(ch[0] != NULL); 2016 2017 set_thread(1); 2018 ch[1] = spdk_bdev_get_io_channel(g_desc); 2019 CU_ASSERT(ch[1] != NULL); 2020 2021 set_thread(2); 2022 ch[2] = spdk_bdev_get_io_channel(g_desc); 2023 CU_ASSERT(ch[2] != NULL); 2024 2025 /* Multi-thread mode 2026 * 1, Check the poller was registered successfully 2027 * 2, Check the timeout IO and ensure the IO was the submitted by user 2028 * 3, Check the link int the bdev_ch works right. 2029 * 4, Close desc and put io channel during the timeout poller is polling 2030 */ 2031 2032 /* In desc thread set the timeout */ 2033 set_thread(0); 2034 CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2035 CU_ASSERT(g_desc->io_timeout_poller != NULL); 2036 CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb); 2037 CU_ASSERT(g_desc->cb_arg == &cb_arg); 2038 2039 /* check the IO submitted list and timeout handler */ 2040 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0); 2041 bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]); 2042 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1); 2043 2044 set_thread(1); 2045 CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0); 2046 bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]); 2047 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1); 2048 2049 /* Now test that a single-vector command is split correctly. 2050 * Offset 14, length 8, payload 0xF000 2051 * Child - Offset 14, length 2, payload 0xF000 2052 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2053 * 2054 * Set up the expected values before calling spdk_bdev_read_blocks 2055 */ 2056 set_thread(2); 2057 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0); 2058 bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]); 2059 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3); 2060 2061 set_thread(0); 2062 memset(&cb_arg, 0, sizeof(cb_arg)); 2063 spdk_delay_us(3 * spdk_get_ticks_hz()); 2064 poll_threads(); 2065 CU_ASSERT(cb_arg.type == 0); 2066 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2067 CU_ASSERT(cb_arg.iov.iov_len == 0); 2068 2069 /* Now the time reach the limit */ 2070 spdk_delay_us(3 * spdk_get_ticks_hz()); 2071 poll_thread(0); 2072 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ); 2073 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000); 2074 CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen); 2075 stub_complete_io(g_bdev.io_target, 1); 2076 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0); 2077 2078 memset(&cb_arg, 0, sizeof(cb_arg)); 2079 set_thread(1); 2080 poll_thread(1); 2081 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2082 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 2083 CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen); 2084 stub_complete_io(g_bdev.io_target, 1); 2085 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0); 2086 2087 memset(&cb_arg, 0, sizeof(cb_arg)); 2088 set_thread(2); 2089 poll_thread(2); 2090 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ); 2091 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 2092 CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen); 2093 stub_complete_io(g_bdev.io_target, 1); 2094 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2); 2095 stub_complete_io(g_bdev.io_target, 1); 2096 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0); 2097 2098 /* Run poll_timeout_done() it means complete the timeout poller */ 2099 set_thread(0); 2100 poll_thread(0); 2101 CU_ASSERT(g_desc->refs == 0); 2102 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0); 2103 set_thread(1); 2104 CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0); 2105 set_thread(2); 2106 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0); 2107 2108 /* Trigger timeout poller to run again, desc->refs is incremented. 2109 * In thread 0 we destroy the io channel before timeout poller runs. 2110 * Timeout callback is not called on thread 0. 2111 */ 2112 spdk_delay_us(6 * spdk_get_ticks_hz()); 2113 memset(&cb_arg, 0, sizeof(cb_arg)); 2114 set_thread(0); 2115 stub_complete_io(g_bdev.io_target, 1); 2116 spdk_put_io_channel(ch[0]); 2117 poll_thread(0); 2118 CU_ASSERT(g_desc->refs == 1) 2119 CU_ASSERT(cb_arg.type == 0); 2120 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2121 CU_ASSERT(cb_arg.iov.iov_len == 0); 2122 2123 /* In thread 1 timeout poller runs then we destroy the io channel 2124 * Timeout callback is called on thread 1. 2125 */ 2126 memset(&cb_arg, 0, sizeof(cb_arg)); 2127 set_thread(1); 2128 poll_thread(1); 2129 stub_complete_io(g_bdev.io_target, 1); 2130 spdk_put_io_channel(ch[1]); 2131 poll_thread(1); 2132 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2133 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000); 2134 CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen); 2135 2136 /* Close the desc. 2137 * Unregister the timeout poller first. 2138 * Then decrement desc->refs but it's not zero yet so desc is not freed. 2139 */ 2140 set_thread(0); 2141 spdk_bdev_close(g_desc); 2142 CU_ASSERT(g_desc->refs == 1); 2143 CU_ASSERT(g_desc->io_timeout_poller == NULL); 2144 2145 /* Timeout poller runs on thread 2 then we destroy the io channel. 2146 * Desc is closed so we would exit the timeout poller directly. 2147 * timeout callback is not called on thread 2. 2148 */ 2149 memset(&cb_arg, 0, sizeof(cb_arg)); 2150 set_thread(2); 2151 poll_thread(2); 2152 stub_complete_io(g_bdev.io_target, 1); 2153 spdk_put_io_channel(ch[2]); 2154 poll_thread(2); 2155 CU_ASSERT(cb_arg.type == 0); 2156 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2157 CU_ASSERT(cb_arg.iov.iov_len == 0); 2158 2159 set_thread(0); 2160 poll_thread(0); 2161 g_teardown_done = false; 2162 unregister_bdev(&g_bdev); 2163 spdk_io_device_unregister(&g_io_device, NULL); 2164 spdk_bdev_finish(finish_cb, NULL); 2165 spdk_iobuf_finish(finish_cb, NULL); 2166 poll_threads(); 2167 memset(&g_bdev, 0, sizeof(g_bdev)); 2168 CU_ASSERT(g_teardown_done == true); 2169 g_teardown_done = false; 2170 free_threads(); 2171 free_cores(); 2172 } 2173 2174 static bool g_io_done2; 2175 static bool g_lock_lba_range_done; 2176 static bool g_unlock_lba_range_done; 2177 2178 static void 2179 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 2180 { 2181 g_io_done2 = true; 2182 spdk_bdev_free_io(bdev_io); 2183 } 2184 2185 static void 2186 lock_lba_range_done(struct lba_range *range, void *ctx, int status) 2187 { 2188 g_lock_lba_range_done = true; 2189 } 2190 2191 static void 2192 unlock_lba_range_done(struct lba_range *range, void *ctx, int status) 2193 { 2194 g_unlock_lba_range_done = true; 2195 } 2196 2197 static uint32_t 2198 stub_channel_outstanding_cnt(void *io_target) 2199 { 2200 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 2201 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 2202 uint32_t outstanding_cnt; 2203 2204 outstanding_cnt = ch->outstanding_cnt; 2205 2206 spdk_put_io_channel(_ch); 2207 return outstanding_cnt; 2208 } 2209 2210 static void 2211 lock_lba_range_then_submit_io(void) 2212 { 2213 struct spdk_bdev_desc *desc = NULL; 2214 void *io_target; 2215 struct spdk_io_channel *io_ch[3]; 2216 struct spdk_bdev_channel *bdev_ch[3]; 2217 struct lba_range *range; 2218 char buf[4096]; 2219 int ctx0, ctx1, ctx2; 2220 int rc; 2221 2222 setup_test(); 2223 2224 io_target = g_bdev.io_target; 2225 desc = g_desc; 2226 2227 set_thread(0); 2228 io_ch[0] = spdk_bdev_get_io_channel(desc); 2229 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 2230 CU_ASSERT(io_ch[0] != NULL); 2231 2232 set_thread(1); 2233 io_ch[1] = spdk_bdev_get_io_channel(desc); 2234 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 2235 CU_ASSERT(io_ch[1] != NULL); 2236 2237 set_thread(0); 2238 g_lock_lba_range_done = false; 2239 rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0); 2240 CU_ASSERT(rc == 0); 2241 poll_threads(); 2242 2243 /* The lock should immediately become valid, since there are no outstanding 2244 * write I/O. 2245 */ 2246 CU_ASSERT(g_lock_lba_range_done == true); 2247 range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges); 2248 SPDK_CU_ASSERT_FATAL(range != NULL); 2249 CU_ASSERT(range->offset == 20); 2250 CU_ASSERT(range->length == 10); 2251 CU_ASSERT(range->owner_ch == bdev_ch[0]); 2252 2253 g_io_done = false; 2254 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2255 rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0); 2256 CU_ASSERT(rc == 0); 2257 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2258 2259 stub_complete_io(io_target, 1); 2260 poll_threads(); 2261 CU_ASSERT(g_io_done == true); 2262 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2263 2264 /* Try a write I/O. This should actually be allowed to execute, since the channel 2265 * holding the lock is submitting the write I/O. 2266 */ 2267 g_io_done = false; 2268 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2269 rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0); 2270 CU_ASSERT(rc == 0); 2271 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2272 2273 stub_complete_io(io_target, 1); 2274 poll_threads(); 2275 CU_ASSERT(g_io_done == true); 2276 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2277 2278 /* Try a write I/O. This should get queued in the io_locked tailq. */ 2279 set_thread(1); 2280 g_io_done = false; 2281 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2282 rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1); 2283 CU_ASSERT(rc == 0); 2284 poll_threads(); 2285 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0); 2286 CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2287 CU_ASSERT(g_io_done == false); 2288 2289 /* Try to unlock the lba range using thread 1's io_ch. This should fail. */ 2290 rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1); 2291 CU_ASSERT(rc == -EINVAL); 2292 2293 /* Now create a new channel and submit a write I/O with it. This should also be queued. 2294 * The new channel should inherit the active locks from the bdev's internal list. 2295 */ 2296 set_thread(2); 2297 io_ch[2] = spdk_bdev_get_io_channel(desc); 2298 bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]); 2299 CU_ASSERT(io_ch[2] != NULL); 2300 2301 g_io_done2 = false; 2302 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2303 rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2); 2304 CU_ASSERT(rc == 0); 2305 poll_threads(); 2306 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0); 2307 CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2308 CU_ASSERT(g_io_done2 == false); 2309 2310 set_thread(0); 2311 rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0); 2312 CU_ASSERT(rc == 0); 2313 poll_threads(); 2314 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges)); 2315 2316 /* The LBA range is unlocked, so the write IOs should now have started execution. */ 2317 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2318 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2319 2320 set_thread(1); 2321 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2322 stub_complete_io(io_target, 1); 2323 set_thread(2); 2324 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2325 stub_complete_io(io_target, 1); 2326 2327 poll_threads(); 2328 CU_ASSERT(g_io_done == true); 2329 CU_ASSERT(g_io_done2 == true); 2330 2331 /* Tear down the channels */ 2332 set_thread(0); 2333 spdk_put_io_channel(io_ch[0]); 2334 set_thread(1); 2335 spdk_put_io_channel(io_ch[1]); 2336 set_thread(2); 2337 spdk_put_io_channel(io_ch[2]); 2338 poll_threads(); 2339 set_thread(0); 2340 teardown_test(); 2341 } 2342 2343 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel(). 2344 * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However 2345 * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel(). 2346 * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset() 2347 * completes. Test this behavior. 2348 */ 2349 static void 2350 unregister_during_reset(void) 2351 { 2352 struct spdk_io_channel *io_ch[2]; 2353 bool done_reset = false, done_unregister = false; 2354 int rc; 2355 2356 setup_test(); 2357 set_thread(0); 2358 2359 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 2360 SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL); 2361 2362 set_thread(1); 2363 2364 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 2365 SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL); 2366 2367 set_thread(0); 2368 2369 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 2370 2371 rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset); 2372 CU_ASSERT(rc == 0); 2373 2374 set_thread(0); 2375 2376 poll_thread_times(0, 1); 2377 2378 spdk_bdev_close(g_desc); 2379 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister); 2380 2381 CU_ASSERT(done_reset == false); 2382 CU_ASSERT(done_unregister == false); 2383 2384 poll_threads(); 2385 2386 stub_complete_io(g_bdev.io_target, 0); 2387 2388 poll_threads(); 2389 2390 CU_ASSERT(done_reset == true); 2391 CU_ASSERT(done_unregister == false); 2392 2393 spdk_put_io_channel(io_ch[0]); 2394 2395 set_thread(1); 2396 2397 spdk_put_io_channel(io_ch[1]); 2398 2399 poll_threads(); 2400 2401 CU_ASSERT(done_unregister == true); 2402 2403 /* Restore the original g_bdev so that we can use teardown_test(). */ 2404 set_thread(0); 2405 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 2406 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 2407 teardown_test(); 2408 } 2409 2410 static void 2411 bdev_init_wt_cb(void *done, int rc) 2412 { 2413 } 2414 2415 static int 2416 wrong_thread_setup(void) 2417 { 2418 allocate_cores(1); 2419 allocate_threads(2); 2420 set_thread(0); 2421 2422 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 2423 ut_accel_ch_destroy_cb, 0, NULL); 2424 spdk_bdev_initialize(bdev_init_wt_cb, NULL); 2425 spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch, 2426 sizeof(struct ut_bdev_channel), NULL); 2427 2428 set_thread(1); 2429 2430 return 0; 2431 } 2432 2433 static int 2434 wrong_thread_teardown(void) 2435 { 2436 int rc = 0; 2437 2438 set_thread(0); 2439 2440 g_teardown_done = false; 2441 spdk_io_device_unregister(&g_io_device, NULL); 2442 spdk_bdev_finish(finish_cb, NULL); 2443 poll_threads(); 2444 memset(&g_bdev, 0, sizeof(g_bdev)); 2445 if (!g_teardown_done) { 2446 fprintf(stderr, "%s:%d %s: teardown not done\n", __FILE__, __LINE__, __func__); 2447 rc = -1; 2448 } 2449 g_teardown_done = false; 2450 2451 spdk_io_device_unregister(&g_accel_io_device, NULL); 2452 free_threads(); 2453 free_cores(); 2454 2455 return rc; 2456 } 2457 2458 static void 2459 _bdev_unregistered_wt(void *ctx, int rc) 2460 { 2461 struct spdk_thread **threadp = ctx; 2462 2463 *threadp = spdk_get_thread(); 2464 } 2465 2466 static void 2467 spdk_bdev_register_wt(void) 2468 { 2469 struct spdk_bdev bdev = { 0 }; 2470 int rc; 2471 struct spdk_thread *unreg_thread; 2472 2473 bdev.name = "wt_bdev"; 2474 bdev.fn_table = &fn_table; 2475 bdev.module = &bdev_ut_if; 2476 bdev.blocklen = 4096; 2477 bdev.blockcnt = 1024; 2478 2479 /* Can register only on app thread */ 2480 rc = spdk_bdev_register(&bdev); 2481 CU_ASSERT(rc == -EINVAL); 2482 2483 /* Can unregister on any thread */ 2484 set_thread(0); 2485 rc = spdk_bdev_register(&bdev); 2486 CU_ASSERT(rc == 0); 2487 set_thread(1); 2488 unreg_thread = NULL; 2489 spdk_bdev_unregister(&bdev, _bdev_unregistered_wt, &unreg_thread); 2490 poll_threads(); 2491 CU_ASSERT(unreg_thread == spdk_get_thread()); 2492 2493 /* Can unregister by name on any thread */ 2494 set_thread(0); 2495 rc = spdk_bdev_register(&bdev); 2496 CU_ASSERT(rc == 0); 2497 set_thread(1); 2498 unreg_thread = NULL; 2499 rc = spdk_bdev_unregister_by_name(bdev.name, bdev.module, _bdev_unregistered_wt, 2500 &unreg_thread); 2501 CU_ASSERT(rc == 0); 2502 poll_threads(); 2503 CU_ASSERT(unreg_thread == spdk_get_thread()); 2504 } 2505 2506 static void 2507 wait_for_examine_cb(void *arg) 2508 { 2509 struct spdk_thread **thread = arg; 2510 2511 *thread = spdk_get_thread(); 2512 } 2513 2514 static void 2515 spdk_bdev_examine_wt(void) 2516 { 2517 int rc; 2518 bool save_auto_examine = g_bdev_opts.bdev_auto_examine; 2519 struct spdk_thread *thread; 2520 2521 g_bdev_opts.bdev_auto_examine = false; 2522 2523 set_thread(0); 2524 register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device); 2525 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL); 2526 set_thread(1); 2527 2528 /* Can examine only on the app thread */ 2529 rc = spdk_bdev_examine("ut_bdev_wt"); 2530 CU_ASSERT(rc == -EINVAL); 2531 unregister_bdev(&g_bdev); 2532 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL); 2533 2534 /* Can wait for examine on app thread, callback called on app thread. */ 2535 set_thread(0); 2536 register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device); 2537 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL); 2538 thread = NULL; 2539 rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread); 2540 CU_ASSERT(rc == 0); 2541 poll_threads(); 2542 CU_ASSERT(thread == spdk_get_thread()); 2543 unregister_bdev(&g_bdev); 2544 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL); 2545 2546 /* Can wait for examine on non-app thread, callback called on same thread. */ 2547 set_thread(0); 2548 register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device); 2549 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL); 2550 thread = NULL; 2551 rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread); 2552 CU_ASSERT(rc == 0); 2553 poll_threads(); 2554 CU_ASSERT(thread == spdk_get_thread()); 2555 unregister_bdev(&g_bdev); 2556 CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL); 2557 2558 unregister_bdev(&g_bdev); 2559 g_bdev_opts.bdev_auto_examine = save_auto_examine; 2560 } 2561 2562 static void 2563 event_notify_and_close(void) 2564 { 2565 int resize_notify_count = 0; 2566 struct spdk_bdev_desc *desc = NULL; 2567 struct spdk_bdev *bdev; 2568 int rc; 2569 2570 setup_test(); 2571 set_thread(0); 2572 2573 /* setup_test() automatically opens the bdev, but this test needs to do 2574 * that in a different way. */ 2575 spdk_bdev_close(g_desc); 2576 poll_threads(); 2577 2578 set_thread(1); 2579 2580 rc = spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &resize_notify_count, &desc); 2581 CU_ASSERT(rc == 0); 2582 SPDK_CU_ASSERT_FATAL(desc != NULL); 2583 2584 bdev = spdk_bdev_desc_get_bdev(desc); 2585 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2586 2587 /* Test a normal case that a resize event is notified. */ 2588 set_thread(0); 2589 2590 rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 2); 2591 CU_ASSERT(rc == 0); 2592 CU_ASSERT(bdev->blockcnt == 1024 * 2); 2593 CU_ASSERT(desc->refs == 1); 2594 CU_ASSERT(resize_notify_count == 0); 2595 2596 poll_threads(); 2597 2598 CU_ASSERT(desc->refs == 0); 2599 CU_ASSERT(resize_notify_count == 1); 2600 2601 /* Test a complex case if the bdev is closed after two event_notify messages are sent, 2602 * then both event_notify messages are discarded and the desc is freed. 2603 */ 2604 rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 3); 2605 CU_ASSERT(rc == 0); 2606 CU_ASSERT(bdev->blockcnt == 1024 * 3); 2607 CU_ASSERT(desc->refs == 1); 2608 CU_ASSERT(resize_notify_count == 1); 2609 2610 rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 4); 2611 CU_ASSERT(rc == 0); 2612 CU_ASSERT(bdev->blockcnt == 1024 * 4); 2613 CU_ASSERT(desc->refs == 2); 2614 CU_ASSERT(resize_notify_count == 1); 2615 2616 set_thread(1); 2617 2618 spdk_bdev_close(desc); 2619 CU_ASSERT(desc->closed == true); 2620 CU_ASSERT(desc->refs == 2); 2621 CU_ASSERT(resize_notify_count == 1); 2622 2623 poll_threads(); 2624 2625 CU_ASSERT(resize_notify_count == 1); 2626 2627 set_thread(0); 2628 2629 /* Restore g_desc. Then, we can execute teardown_test(). */ 2630 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 2631 teardown_test(); 2632 } 2633 2634 int 2635 main(int argc, char **argv) 2636 { 2637 CU_pSuite suite = NULL; 2638 CU_pSuite suite_wt = NULL; 2639 unsigned int num_failures; 2640 2641 CU_initialize_registry(); 2642 2643 suite = CU_add_suite("bdev", NULL, NULL); 2644 suite_wt = CU_add_suite("bdev_wrong_thread", wrong_thread_setup, wrong_thread_teardown); 2645 2646 CU_ADD_TEST(suite, basic); 2647 CU_ADD_TEST(suite, unregister_and_close); 2648 CU_ADD_TEST(suite, unregister_and_close_different_threads); 2649 CU_ADD_TEST(suite, basic_qos); 2650 CU_ADD_TEST(suite, put_channel_during_reset); 2651 CU_ADD_TEST(suite, aborted_reset); 2652 CU_ADD_TEST(suite, aborted_reset_no_outstanding_io); 2653 CU_ADD_TEST(suite, io_during_reset); 2654 CU_ADD_TEST(suite, reset_completions); 2655 CU_ADD_TEST(suite, io_during_qos_queue); 2656 CU_ADD_TEST(suite, io_during_qos_reset); 2657 CU_ADD_TEST(suite, enomem); 2658 CU_ADD_TEST(suite, enomem_multi_bdev); 2659 CU_ADD_TEST(suite, enomem_multi_bdev_unregister); 2660 CU_ADD_TEST(suite, enomem_multi_io_target); 2661 CU_ADD_TEST(suite, qos_dynamic_enable); 2662 CU_ADD_TEST(suite, bdev_histograms_mt); 2663 CU_ADD_TEST(suite, bdev_set_io_timeout_mt); 2664 CU_ADD_TEST(suite, lock_lba_range_then_submit_io); 2665 CU_ADD_TEST(suite, unregister_during_reset); 2666 CU_ADD_TEST(suite_wt, spdk_bdev_register_wt); 2667 CU_ADD_TEST(suite_wt, spdk_bdev_examine_wt); 2668 CU_ADD_TEST(suite, event_notify_and_close); 2669 2670 num_failures = spdk_ut_run_tests(argc, argv, NULL); 2671 CU_cleanup_registry(); 2672 return num_failures; 2673 } 2674