1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk_cunit.h" 7 8 #include "common/lib/ut_multithread.c" 9 #include "unit/lib/json_mock.c" 10 11 #include "spdk/config.h" 12 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 13 #undef SPDK_CONFIG_VTUNE 14 15 #include "bdev/bdev.c" 16 17 #define BDEV_UT_NUM_THREADS 3 18 19 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 20 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 21 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk, 22 int *asc, int *ascq)); 23 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 24 "test_domain"); 25 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 26 (struct spdk_memory_domain *domain), 0); 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 35 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 47 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 struct ut_bdev { 53 struct spdk_bdev bdev; 54 void *io_target; 55 }; 56 57 struct ut_bdev_channel { 58 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 59 uint32_t outstanding_cnt; 60 uint32_t avail_cnt; 61 }; 62 63 int g_io_device; 64 struct ut_bdev g_bdev; 65 struct spdk_bdev_desc *g_desc; 66 bool g_teardown_done = false; 67 bool g_get_io_channel = true; 68 bool g_create_ch = true; 69 bool g_init_complete_called = false; 70 bool g_fini_start_called = true; 71 int g_status = 0; 72 int g_count = 0; 73 struct spdk_histogram_data *g_histogram = NULL; 74 75 static int 76 stub_create_ch(void *io_device, void *ctx_buf) 77 { 78 struct ut_bdev_channel *ch = ctx_buf; 79 80 if (g_create_ch == false) { 81 return -1; 82 } 83 84 TAILQ_INIT(&ch->outstanding_io); 85 ch->outstanding_cnt = 0; 86 /* 87 * When avail gets to 0, the submit_request function will return ENOMEM. 88 * Most tests to not want ENOMEM to occur, so by default set this to a 89 * big value that won't get hit. The ENOMEM tests can then override this 90 * value to something much smaller to induce ENOMEM conditions. 91 */ 92 ch->avail_cnt = 2048; 93 return 0; 94 } 95 96 static void 97 stub_destroy_ch(void *io_device, void *ctx_buf) 98 { 99 } 100 101 static struct spdk_io_channel * 102 stub_get_io_channel(void *ctx) 103 { 104 struct ut_bdev *ut_bdev = ctx; 105 106 if (g_get_io_channel == true) { 107 return spdk_get_io_channel(ut_bdev->io_target); 108 } else { 109 return NULL; 110 } 111 } 112 113 static int 114 stub_destruct(void *ctx) 115 { 116 return 0; 117 } 118 119 static void 120 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 121 { 122 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 123 struct spdk_bdev_io *io; 124 125 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { 126 while (!TAILQ_EMPTY(&ch->outstanding_io)) { 127 io = TAILQ_FIRST(&ch->outstanding_io); 128 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 129 ch->outstanding_cnt--; 130 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED); 131 ch->avail_cnt++; 132 } 133 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 134 TAILQ_FOREACH(io, &ch->outstanding_io, module_link) { 135 if (io == bdev_io->u.abort.bio_to_abort) { 136 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 137 ch->outstanding_cnt--; 138 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED); 139 ch->avail_cnt++; 140 141 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 142 return; 143 } 144 } 145 146 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 147 return; 148 } 149 150 if (ch->avail_cnt > 0) { 151 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 152 ch->outstanding_cnt++; 153 ch->avail_cnt--; 154 } else { 155 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 156 } 157 } 158 159 static uint32_t 160 stub_complete_io(void *io_target, uint32_t num_to_complete) 161 { 162 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 163 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 164 struct spdk_bdev_io *io; 165 bool complete_all = (num_to_complete == 0); 166 uint32_t num_completed = 0; 167 168 while (complete_all || num_completed < num_to_complete) { 169 if (TAILQ_EMPTY(&ch->outstanding_io)) { 170 break; 171 } 172 io = TAILQ_FIRST(&ch->outstanding_io); 173 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 174 ch->outstanding_cnt--; 175 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS); 176 ch->avail_cnt++; 177 num_completed++; 178 } 179 spdk_put_io_channel(_ch); 180 return num_completed; 181 } 182 183 static bool 184 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type) 185 { 186 return true; 187 } 188 189 static struct spdk_bdev_fn_table fn_table = { 190 .get_io_channel = stub_get_io_channel, 191 .destruct = stub_destruct, 192 .submit_request = stub_submit_request, 193 .io_type_supported = stub_io_type_supported, 194 }; 195 196 struct spdk_bdev_module bdev_ut_if; 197 198 static int 199 module_init(void) 200 { 201 spdk_bdev_module_init_done(&bdev_ut_if); 202 return 0; 203 } 204 205 static void 206 module_fini(void) 207 { 208 } 209 210 static void 211 init_complete(void) 212 { 213 g_init_complete_called = true; 214 } 215 216 static void 217 fini_start(void) 218 { 219 g_fini_start_called = true; 220 } 221 222 struct spdk_bdev_module bdev_ut_if = { 223 .name = "bdev_ut", 224 .module_init = module_init, 225 .module_fini = module_fini, 226 .async_init = true, 227 .init_complete = init_complete, 228 .fini_start = fini_start, 229 }; 230 231 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 232 233 static void 234 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target) 235 { 236 memset(ut_bdev, 0, sizeof(*ut_bdev)); 237 238 ut_bdev->io_target = io_target; 239 ut_bdev->bdev.ctxt = ut_bdev; 240 ut_bdev->bdev.name = name; 241 ut_bdev->bdev.fn_table = &fn_table; 242 ut_bdev->bdev.module = &bdev_ut_if; 243 ut_bdev->bdev.blocklen = 4096; 244 ut_bdev->bdev.blockcnt = 1024; 245 246 spdk_bdev_register(&ut_bdev->bdev); 247 } 248 249 static void 250 unregister_bdev(struct ut_bdev *ut_bdev) 251 { 252 /* Handle any deferred messages. */ 253 poll_threads(); 254 spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL); 255 /* Handle the async bdev unregister. */ 256 poll_threads(); 257 } 258 259 static void 260 bdev_init_cb(void *done, int rc) 261 { 262 CU_ASSERT(rc == 0); 263 *(bool *)done = true; 264 } 265 266 static void 267 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 268 void *event_ctx) 269 { 270 switch (type) { 271 case SPDK_BDEV_EVENT_REMOVE: 272 if (event_ctx != NULL) { 273 *(bool *)event_ctx = true; 274 } 275 break; 276 default: 277 CU_ASSERT(false); 278 break; 279 } 280 } 281 282 static void 283 setup_test(void) 284 { 285 bool done = false; 286 int rc; 287 288 allocate_cores(BDEV_UT_NUM_THREADS); 289 allocate_threads(BDEV_UT_NUM_THREADS); 290 set_thread(0); 291 292 rc = spdk_iobuf_initialize(); 293 CU_ASSERT(rc == 0); 294 spdk_bdev_initialize(bdev_init_cb, &done); 295 spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch, 296 sizeof(struct ut_bdev_channel), NULL); 297 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 298 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 299 } 300 301 static void 302 finish_cb(void *cb_arg) 303 { 304 g_teardown_done = true; 305 } 306 307 static void 308 teardown_test(void) 309 { 310 set_thread(0); 311 g_teardown_done = false; 312 spdk_bdev_close(g_desc); 313 g_desc = NULL; 314 unregister_bdev(&g_bdev); 315 spdk_io_device_unregister(&g_io_device, NULL); 316 spdk_bdev_finish(finish_cb, NULL); 317 spdk_iobuf_finish(finish_cb, NULL); 318 poll_threads(); 319 memset(&g_bdev, 0, sizeof(g_bdev)); 320 CU_ASSERT(g_teardown_done == true); 321 g_teardown_done = false; 322 free_threads(); 323 free_cores(); 324 } 325 326 static uint32_t 327 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq) 328 { 329 struct spdk_bdev_io *io; 330 uint32_t cnt = 0; 331 332 TAILQ_FOREACH(io, tailq, internal.link) { 333 cnt++; 334 } 335 336 return cnt; 337 } 338 339 static void 340 basic(void) 341 { 342 g_init_complete_called = false; 343 setup_test(); 344 CU_ASSERT(g_init_complete_called == true); 345 346 set_thread(0); 347 348 g_get_io_channel = false; 349 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 350 CU_ASSERT(g_ut_threads[0].ch == NULL); 351 352 g_get_io_channel = true; 353 g_create_ch = false; 354 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 355 CU_ASSERT(g_ut_threads[0].ch == NULL); 356 357 g_get_io_channel = true; 358 g_create_ch = true; 359 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 360 CU_ASSERT(g_ut_threads[0].ch != NULL); 361 spdk_put_io_channel(g_ut_threads[0].ch); 362 363 g_fini_start_called = false; 364 teardown_test(); 365 CU_ASSERT(g_fini_start_called == true); 366 } 367 368 static void 369 _bdev_unregistered(void *done, int rc) 370 { 371 CU_ASSERT(rc == 0); 372 *(bool *)done = true; 373 } 374 375 static void 376 unregister_and_close(void) 377 { 378 bool done, remove_notify; 379 struct spdk_bdev_desc *desc = NULL; 380 381 setup_test(); 382 set_thread(0); 383 384 /* setup_test() automatically opens the bdev, 385 * but this test needs to do that in a different 386 * way. */ 387 spdk_bdev_close(g_desc); 388 poll_threads(); 389 390 /* Try hotremoving a bdev with descriptors which don't provide 391 * any context to the notification callback */ 392 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc); 393 SPDK_CU_ASSERT_FATAL(desc != NULL); 394 395 /* There is an open descriptor on the device. Unregister it, 396 * which can't proceed until the descriptor is closed. */ 397 done = false; 398 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); 399 400 /* Poll the threads to allow all events to be processed */ 401 poll_threads(); 402 403 /* Make sure the bdev was not unregistered. We still have a 404 * descriptor open */ 405 CU_ASSERT(done == false); 406 407 spdk_bdev_close(desc); 408 poll_threads(); 409 desc = NULL; 410 411 /* The unregister should have completed */ 412 CU_ASSERT(done == true); 413 414 415 /* Register the bdev again */ 416 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 417 418 remove_notify = false; 419 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc); 420 SPDK_CU_ASSERT_FATAL(desc != NULL); 421 CU_ASSERT(remove_notify == false); 422 423 /* There is an open descriptor on the device. Unregister it, 424 * which can't proceed until the descriptor is closed. */ 425 done = false; 426 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done); 427 /* No polling has occurred, so neither of these should execute */ 428 CU_ASSERT(remove_notify == false); 429 CU_ASSERT(done == false); 430 431 /* Prior to the unregister completing, close the descriptor */ 432 spdk_bdev_close(desc); 433 434 /* Poll the threads to allow all events to be processed */ 435 poll_threads(); 436 437 /* Remove notify should not have been called because the 438 * descriptor is already closed. */ 439 CU_ASSERT(remove_notify == false); 440 441 /* The unregister should have completed */ 442 CU_ASSERT(done == true); 443 444 /* Restore the original g_bdev so that we can use teardown_test(). */ 445 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 446 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 447 teardown_test(); 448 } 449 450 static void 451 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 452 { 453 bool *done = cb_arg; 454 455 CU_ASSERT(success == true); 456 *done = true; 457 spdk_bdev_free_io(bdev_io); 458 } 459 460 static void 461 put_channel_during_reset(void) 462 { 463 struct spdk_io_channel *io_ch; 464 bool done = false; 465 466 setup_test(); 467 468 set_thread(0); 469 io_ch = spdk_bdev_get_io_channel(g_desc); 470 CU_ASSERT(io_ch != NULL); 471 472 /* 473 * Start a reset, but then put the I/O channel before 474 * the deferred messages for the reset get a chance to 475 * execute. 476 */ 477 spdk_bdev_reset(g_desc, io_ch, reset_done, &done); 478 spdk_put_io_channel(io_ch); 479 poll_threads(); 480 stub_complete_io(g_bdev.io_target, 0); 481 482 teardown_test(); 483 } 484 485 static void 486 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 487 { 488 enum spdk_bdev_io_status *status = cb_arg; 489 490 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 491 spdk_bdev_free_io(bdev_io); 492 } 493 494 static void io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 495 496 static void 497 aborted_reset(void) 498 { 499 struct spdk_io_channel *io_ch[2]; 500 enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING, 501 status2 = SPDK_BDEV_IO_STATUS_PENDING; 502 503 setup_test(); 504 505 set_thread(0); 506 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 507 CU_ASSERT(io_ch[0] != NULL); 508 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); 509 poll_threads(); 510 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 511 512 /* 513 * First reset has been submitted on ch0. Now submit a second 514 * reset on ch1 which will get queued since there is already a 515 * reset in progress. 516 */ 517 set_thread(1); 518 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 519 CU_ASSERT(io_ch[1] != NULL); 520 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); 521 poll_threads(); 522 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 523 524 /* 525 * Now destroy ch1. This will abort the queued reset. Check that 526 * the second reset was completed with failed status. Also check 527 * that bdev->internal.reset_in_progress != NULL, since the 528 * original reset has not been completed yet. This ensures that 529 * the bdev code is correctly noticing that the failed reset is 530 * *not* the one that had been submitted to the bdev module. 531 */ 532 set_thread(1); 533 spdk_put_io_channel(io_ch[1]); 534 poll_threads(); 535 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED); 536 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL); 537 538 /* 539 * Now complete the first reset, verify that it completed with SUCCESS 540 * status and that bdev->internal.reset_in_progress is also set back to NULL. 541 */ 542 set_thread(0); 543 spdk_put_io_channel(io_ch[0]); 544 stub_complete_io(g_bdev.io_target, 0); 545 poll_threads(); 546 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 547 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 548 549 teardown_test(); 550 } 551 552 static void 553 aborted_reset_no_outstanding_io(void) 554 { 555 struct spdk_io_channel *io_ch[2]; 556 struct spdk_bdev_channel *bdev_ch[2]; 557 struct spdk_bdev *bdev[2]; 558 enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING, 559 status2 = SPDK_BDEV_IO_STATUS_PENDING; 560 561 setup_test(); 562 563 /* 564 * This time we test the reset without any outstanding IO 565 * present on the bdev channel, so both resets should finish 566 * immediately. 567 */ 568 569 set_thread(0); 570 /* Set reset_io_drain_timeout to allow bdev 571 * reset to stay pending until we call abort. */ 572 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 573 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 574 bdev[0] = bdev_ch[0]->bdev; 575 bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 576 CU_ASSERT(io_ch[0] != NULL); 577 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); 578 poll_threads(); 579 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 580 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 581 spdk_put_io_channel(io_ch[0]); 582 583 set_thread(1); 584 /* Set reset_io_drain_timeout to allow bdev 585 * reset to stay pending until we call abort. */ 586 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 587 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 588 bdev[1] = bdev_ch[1]->bdev; 589 bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 590 CU_ASSERT(io_ch[1] != NULL); 591 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); 592 poll_threads(); 593 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 594 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS); 595 spdk_put_io_channel(io_ch[1]); 596 597 stub_complete_io(g_bdev.io_target, 0); 598 poll_threads(); 599 600 teardown_test(); 601 } 602 603 604 static void 605 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 606 { 607 enum spdk_bdev_io_status *status = cb_arg; 608 609 *status = bdev_io->internal.status; 610 spdk_bdev_free_io(bdev_io); 611 } 612 613 static void 614 io_during_reset(void) 615 { 616 struct spdk_io_channel *io_ch[2]; 617 struct spdk_bdev_channel *bdev_ch[2]; 618 enum spdk_bdev_io_status status0, status1, status_reset; 619 int rc; 620 621 setup_test(); 622 623 /* 624 * First test normal case - submit an I/O on each of two channels (with no resets) 625 * and verify they complete successfully. 626 */ 627 set_thread(0); 628 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 629 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 630 CU_ASSERT(bdev_ch[0]->flags == 0); 631 status0 = SPDK_BDEV_IO_STATUS_PENDING; 632 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 633 CU_ASSERT(rc == 0); 634 635 set_thread(1); 636 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 637 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 638 CU_ASSERT(bdev_ch[1]->flags == 0); 639 status1 = SPDK_BDEV_IO_STATUS_PENDING; 640 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 641 CU_ASSERT(rc == 0); 642 643 poll_threads(); 644 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 645 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 646 647 set_thread(0); 648 stub_complete_io(g_bdev.io_target, 0); 649 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 650 651 set_thread(1); 652 stub_complete_io(g_bdev.io_target, 0); 653 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 654 655 /* 656 * Now submit a reset, and leave it pending while we submit I/O on two different 657 * channels. These I/O should be failed by the bdev layer since the reset is in 658 * progress. 659 */ 660 set_thread(0); 661 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 662 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset); 663 CU_ASSERT(rc == 0); 664 665 CU_ASSERT(bdev_ch[0]->flags == 0); 666 CU_ASSERT(bdev_ch[1]->flags == 0); 667 poll_threads(); 668 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS); 669 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS); 670 671 set_thread(0); 672 status0 = SPDK_BDEV_IO_STATUS_PENDING; 673 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 674 CU_ASSERT(rc == 0); 675 676 set_thread(1); 677 status1 = SPDK_BDEV_IO_STATUS_PENDING; 678 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 679 CU_ASSERT(rc == 0); 680 681 /* 682 * A reset is in progress so these read I/O should complete with aborted. Note that we 683 * need to poll_threads() since I/O completed inline have their completion deferred. 684 */ 685 poll_threads(); 686 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 687 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED); 688 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED); 689 690 /* 691 * Complete the reset 692 */ 693 set_thread(0); 694 stub_complete_io(g_bdev.io_target, 0); 695 696 /* 697 * Only poll thread 0. We should not get a completion. 698 */ 699 poll_thread(0); 700 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 701 702 /* 703 * Poll both thread 0 and 1 so the messages can propagate and we 704 * get a completion. 705 */ 706 poll_threads(); 707 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 708 709 spdk_put_io_channel(io_ch[0]); 710 set_thread(1); 711 spdk_put_io_channel(io_ch[1]); 712 poll_threads(); 713 714 teardown_test(); 715 } 716 717 static uint32_t 718 count_queued_resets(void *io_target) 719 { 720 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 721 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 722 struct spdk_bdev_io *io; 723 uint32_t submitted_resets = 0; 724 725 TAILQ_FOREACH(io, &ch->outstanding_io, module_link) { 726 if (io->type == SPDK_BDEV_IO_TYPE_RESET) { 727 submitted_resets++; 728 } 729 } 730 731 spdk_put_io_channel(_ch); 732 733 return submitted_resets; 734 } 735 736 static void 737 reset_completions(void) 738 { 739 struct spdk_io_channel *io_ch; 740 struct spdk_bdev_channel *bdev_ch; 741 struct spdk_bdev *bdev; 742 enum spdk_bdev_io_status status0, status_reset; 743 int rc, iter; 744 745 setup_test(); 746 747 /* This test covers four test cases: 748 * 1) reset_io_drain_timeout of a bdev is greater than 0 749 * 2) No outstandind IO are present on any bdev channel 750 * 3) Outstanding IO finish during bdev reset 751 * 4) Outstanding IO do not finish before reset is done waiting 752 * for them. 753 * 754 * Above conditions mainly affect the timing of bdev reset completion 755 * and whether a reset should be skipped via spdk_bdev_io_complete() 756 * or sent down to the underlying bdev module via bdev_io_submit_reset(). */ 757 758 /* Test preparation */ 759 set_thread(0); 760 io_ch = spdk_bdev_get_io_channel(g_desc); 761 bdev_ch = spdk_io_channel_get_ctx(io_ch); 762 CU_ASSERT(bdev_ch->flags == 0); 763 764 765 /* Test case 1) reset_io_drain_timeout set to 0. Reset should be sent down immediately. */ 766 bdev = &g_bdev.bdev; 767 bdev->reset_io_drain_timeout = 0; 768 769 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 770 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 771 CU_ASSERT(rc == 0); 772 poll_threads(); 773 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1); 774 775 /* Call reset completion inside bdev module. */ 776 stub_complete_io(g_bdev.io_target, 0); 777 poll_threads(); 778 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 779 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 780 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 781 782 783 /* Test case 2) no outstanding IO are present. Reset should perform one iteration over 784 * channels and then be skipped. */ 785 bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; 786 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 787 788 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 789 CU_ASSERT(rc == 0); 790 poll_threads(); 791 /* Reset was never submitted to the bdev module. */ 792 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 793 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 794 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 795 796 797 /* Test case 3) outstanding IO finish during bdev reset procedure. Reset should initiate 798 * wait poller to check for IO completions every second, until reset_io_drain_timeout is 799 * reached, but finish earlier than this threshold. */ 800 status0 = SPDK_BDEV_IO_STATUS_PENDING; 801 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 802 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0); 803 CU_ASSERT(rc == 0); 804 805 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 806 CU_ASSERT(rc == 0); 807 poll_threads(); 808 /* The reset just started and should not have been submitted yet. */ 809 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 810 811 poll_threads(); 812 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 813 /* Let the poller wait for about half the time then complete outstanding IO. */ 814 for (iter = 0; iter < 2; iter++) { 815 /* Reset is still processing and not submitted at this point. */ 816 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 817 spdk_delay_us(1000 * 1000); 818 poll_threads(); 819 poll_threads(); 820 } 821 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 822 stub_complete_io(g_bdev.io_target, 0); 823 poll_threads(); 824 spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD); 825 poll_threads(); 826 poll_threads(); 827 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 828 /* Sending reset to the bdev module has been skipped. */ 829 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 830 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 831 832 833 /* Test case 4) outstanding IO are still present after reset_io_drain_timeout 834 * seconds have passed. */ 835 status0 = SPDK_BDEV_IO_STATUS_PENDING; 836 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 837 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0); 838 CU_ASSERT(rc == 0); 839 840 rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); 841 CU_ASSERT(rc == 0); 842 poll_threads(); 843 /* The reset just started and should not have been submitted yet. */ 844 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 845 846 poll_threads(); 847 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 848 /* Let the poller wait for reset_io_drain_timeout seconds. */ 849 for (iter = 0; iter < bdev->reset_io_drain_timeout; iter++) { 850 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0); 851 spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD); 852 poll_threads(); 853 poll_threads(); 854 } 855 856 /* After timing out, the reset should have been sent to the module. */ 857 CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1); 858 /* Complete reset submitted to the module and the read IO. */ 859 stub_complete_io(g_bdev.io_target, 0); 860 poll_threads(); 861 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 862 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 863 864 865 /* Destroy the channel and end the test. */ 866 spdk_put_io_channel(io_ch); 867 poll_threads(); 868 869 teardown_test(); 870 } 871 872 873 static void 874 basic_qos(void) 875 { 876 struct spdk_io_channel *io_ch[2]; 877 struct spdk_bdev_channel *bdev_ch[2]; 878 struct spdk_bdev *bdev; 879 enum spdk_bdev_io_status status, abort_status; 880 int rc; 881 882 setup_test(); 883 884 /* Enable QoS */ 885 bdev = &g_bdev.bdev; 886 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 887 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 888 TAILQ_INIT(&bdev->internal.qos->queued); 889 /* 890 * Enable read/write IOPS, read only byte per second and 891 * read/write byte per second rate limits. 892 * In this case, all rate limits will take equal effect. 893 */ 894 /* 2000 read/write I/O per second, or 2 per millisecond */ 895 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000; 896 /* 8K read/write byte per millisecond with 4K block size */ 897 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000; 898 /* 8K read only byte per millisecond with 4K block size */ 899 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000; 900 901 g_get_io_channel = true; 902 903 set_thread(0); 904 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 905 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 906 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 907 908 set_thread(1); 909 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 910 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 911 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 912 913 /* 914 * Send an I/O on thread 0, which is where the QoS thread is running. 915 */ 916 set_thread(0); 917 status = SPDK_BDEV_IO_STATUS_PENDING; 918 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); 919 CU_ASSERT(rc == 0); 920 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 921 poll_threads(); 922 stub_complete_io(g_bdev.io_target, 0); 923 poll_threads(); 924 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); 925 926 /* Send an I/O on thread 1. The QoS thread is not running here. */ 927 status = SPDK_BDEV_IO_STATUS_PENDING; 928 set_thread(1); 929 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); 930 CU_ASSERT(rc == 0); 931 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 932 poll_threads(); 933 /* Complete I/O on thread 1. This should not complete the I/O we submitted */ 934 stub_complete_io(g_bdev.io_target, 0); 935 poll_threads(); 936 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 937 /* Now complete I/O on thread 0 */ 938 set_thread(0); 939 poll_threads(); 940 stub_complete_io(g_bdev.io_target, 0); 941 poll_threads(); 942 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); 943 944 /* Reset rate limit for the next test cases. */ 945 spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC); 946 poll_threads(); 947 948 /* 949 * Test abort request when QoS is enabled. 950 */ 951 952 /* Send an I/O on thread 0, which is where the QoS thread is running. */ 953 set_thread(0); 954 status = SPDK_BDEV_IO_STATUS_PENDING; 955 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); 956 CU_ASSERT(rc == 0); 957 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 958 /* Send an abort to the I/O on the same thread. */ 959 abort_status = SPDK_BDEV_IO_STATUS_PENDING; 960 rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status); 961 CU_ASSERT(rc == 0); 962 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING); 963 poll_threads(); 964 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 965 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED); 966 967 /* Send an I/O on thread 1. The QoS thread is not running here. */ 968 status = SPDK_BDEV_IO_STATUS_PENDING; 969 set_thread(1); 970 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); 971 CU_ASSERT(rc == 0); 972 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 973 poll_threads(); 974 /* Send an abort to the I/O on the same thread. */ 975 abort_status = SPDK_BDEV_IO_STATUS_PENDING; 976 rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status); 977 CU_ASSERT(rc == 0); 978 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING); 979 poll_threads(); 980 /* Complete the I/O with failure and the abort with success on thread 1. */ 981 CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 982 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED); 983 984 set_thread(0); 985 986 /* 987 * Close the descriptor only, which should stop the qos channel as 988 * the last descriptor removed. 989 */ 990 spdk_bdev_close(g_desc); 991 poll_threads(); 992 CU_ASSERT(bdev->internal.qos->ch == NULL); 993 994 /* 995 * Open the bdev again which shall setup the qos channel as the 996 * channels are valid. 997 */ 998 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 999 poll_threads(); 1000 CU_ASSERT(bdev->internal.qos->ch != NULL); 1001 1002 /* Tear down the channels */ 1003 set_thread(0); 1004 spdk_put_io_channel(io_ch[0]); 1005 set_thread(1); 1006 spdk_put_io_channel(io_ch[1]); 1007 poll_threads(); 1008 set_thread(0); 1009 1010 /* Close the descriptor, which should stop the qos channel */ 1011 spdk_bdev_close(g_desc); 1012 poll_threads(); 1013 CU_ASSERT(bdev->internal.qos->ch == NULL); 1014 1015 /* Open the bdev again, no qos channel setup without valid channels. */ 1016 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 1017 poll_threads(); 1018 CU_ASSERT(bdev->internal.qos->ch == NULL); 1019 1020 /* Create the channels in reverse order. */ 1021 set_thread(1); 1022 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1023 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1024 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1025 1026 set_thread(0); 1027 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1028 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1029 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1030 1031 /* Confirm that the qos thread is now thread 1 */ 1032 CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]); 1033 1034 /* Tear down the channels */ 1035 set_thread(0); 1036 spdk_put_io_channel(io_ch[0]); 1037 set_thread(1); 1038 spdk_put_io_channel(io_ch[1]); 1039 poll_threads(); 1040 1041 set_thread(0); 1042 1043 teardown_test(); 1044 } 1045 1046 static void 1047 io_during_qos_queue(void) 1048 { 1049 struct spdk_io_channel *io_ch[2]; 1050 struct spdk_bdev_channel *bdev_ch[2]; 1051 struct spdk_bdev *bdev; 1052 enum spdk_bdev_io_status status0, status1, status2; 1053 int rc; 1054 1055 setup_test(); 1056 MOCK_SET(spdk_get_ticks, 0); 1057 1058 /* Enable QoS */ 1059 bdev = &g_bdev.bdev; 1060 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 1061 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 1062 TAILQ_INIT(&bdev->internal.qos->queued); 1063 /* 1064 * Enable read/write IOPS, read only byte per sec, write only 1065 * byte per sec and read/write byte per sec rate limits. 1066 * In this case, both read only and write only byte per sec 1067 * rate limit will take effect. 1068 */ 1069 /* 4000 read/write I/O per second, or 4 per millisecond */ 1070 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000; 1071 /* 8K byte per millisecond with 4K block size */ 1072 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000; 1073 /* 4K byte per millisecond with 4K block size */ 1074 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000; 1075 /* 4K byte per millisecond with 4K block size */ 1076 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000; 1077 1078 g_get_io_channel = true; 1079 1080 /* Create channels */ 1081 set_thread(0); 1082 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1083 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1084 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1085 1086 set_thread(1); 1087 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1088 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1089 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1090 1091 /* Send two read I/Os */ 1092 status1 = SPDK_BDEV_IO_STATUS_PENDING; 1093 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 1094 CU_ASSERT(rc == 0); 1095 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1096 set_thread(0); 1097 status0 = SPDK_BDEV_IO_STATUS_PENDING; 1098 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 1099 CU_ASSERT(rc == 0); 1100 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 1101 /* Send one write I/O */ 1102 status2 = SPDK_BDEV_IO_STATUS_PENDING; 1103 rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2); 1104 CU_ASSERT(rc == 0); 1105 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING); 1106 1107 /* Complete any I/O that arrived at the disk */ 1108 poll_threads(); 1109 set_thread(1); 1110 stub_complete_io(g_bdev.io_target, 0); 1111 set_thread(0); 1112 stub_complete_io(g_bdev.io_target, 0); 1113 poll_threads(); 1114 1115 /* Only one of the two read I/Os should complete. (logical XOR) */ 1116 if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) { 1117 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1118 } else { 1119 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 1120 } 1121 /* The write I/O should complete. */ 1122 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS); 1123 1124 /* Advance in time by a millisecond */ 1125 spdk_delay_us(1000); 1126 1127 /* Complete more I/O */ 1128 poll_threads(); 1129 set_thread(1); 1130 stub_complete_io(g_bdev.io_target, 0); 1131 set_thread(0); 1132 stub_complete_io(g_bdev.io_target, 0); 1133 poll_threads(); 1134 1135 /* Now the second read I/O should be done */ 1136 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 1137 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 1138 1139 /* Tear down the channels */ 1140 set_thread(1); 1141 spdk_put_io_channel(io_ch[1]); 1142 set_thread(0); 1143 spdk_put_io_channel(io_ch[0]); 1144 poll_threads(); 1145 1146 teardown_test(); 1147 } 1148 1149 static void 1150 io_during_qos_reset(void) 1151 { 1152 struct spdk_io_channel *io_ch[2]; 1153 struct spdk_bdev_channel *bdev_ch[2]; 1154 struct spdk_bdev *bdev; 1155 enum spdk_bdev_io_status status0, status1, reset_status; 1156 int rc; 1157 1158 setup_test(); 1159 MOCK_SET(spdk_get_ticks, 0); 1160 1161 /* Enable QoS */ 1162 bdev = &g_bdev.bdev; 1163 bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); 1164 SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL); 1165 TAILQ_INIT(&bdev->internal.qos->queued); 1166 /* 1167 * Enable read/write IOPS, write only byte per sec and 1168 * read/write byte per second rate limits. 1169 * In this case, read/write byte per second rate limit will 1170 * take effect first. 1171 */ 1172 /* 2000 read/write I/O per second, or 2 per millisecond */ 1173 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000; 1174 /* 4K byte per millisecond with 4K block size */ 1175 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000; 1176 /* 8K byte per millisecond with 4K block size */ 1177 bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000; 1178 1179 g_get_io_channel = true; 1180 1181 /* Create channels */ 1182 set_thread(0); 1183 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1184 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1185 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 1186 1187 set_thread(1); 1188 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1189 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1190 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 1191 1192 /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */ 1193 status1 = SPDK_BDEV_IO_STATUS_PENDING; 1194 rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 1195 CU_ASSERT(rc == 0); 1196 set_thread(0); 1197 status0 = SPDK_BDEV_IO_STATUS_PENDING; 1198 rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 1199 CU_ASSERT(rc == 0); 1200 1201 poll_threads(); 1202 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 1203 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 1204 1205 /* Reset the bdev. */ 1206 reset_status = SPDK_BDEV_IO_STATUS_PENDING; 1207 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status); 1208 CU_ASSERT(rc == 0); 1209 1210 /* Complete any I/O that arrived at the disk */ 1211 poll_threads(); 1212 set_thread(1); 1213 stub_complete_io(g_bdev.io_target, 0); 1214 set_thread(0); 1215 stub_complete_io(g_bdev.io_target, 0); 1216 poll_threads(); 1217 1218 CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1219 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED); 1220 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED); 1221 1222 /* Tear down the channels */ 1223 set_thread(1); 1224 spdk_put_io_channel(io_ch[1]); 1225 set_thread(0); 1226 spdk_put_io_channel(io_ch[0]); 1227 poll_threads(); 1228 1229 teardown_test(); 1230 } 1231 1232 static void 1233 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1234 { 1235 enum spdk_bdev_io_status *status = cb_arg; 1236 1237 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 1238 spdk_bdev_free_io(bdev_io); 1239 } 1240 1241 static void 1242 enomem(void) 1243 { 1244 struct spdk_io_channel *io_ch; 1245 struct spdk_bdev_channel *bdev_ch; 1246 struct spdk_bdev_shared_resource *shared_resource; 1247 struct ut_bdev_channel *ut_ch; 1248 const uint32_t IO_ARRAY_SIZE = 64; 1249 const uint32_t AVAIL = 20; 1250 enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset; 1251 uint32_t nomem_cnt, i; 1252 struct spdk_bdev_io *first_io; 1253 int rc; 1254 1255 setup_test(); 1256 1257 set_thread(0); 1258 io_ch = spdk_bdev_get_io_channel(g_desc); 1259 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1260 shared_resource = bdev_ch->shared_resource; 1261 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1262 ut_ch->avail_cnt = AVAIL; 1263 1264 /* First submit a number of IOs equal to what the channel can support. */ 1265 for (i = 0; i < AVAIL; i++) { 1266 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1267 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1268 CU_ASSERT(rc == 0); 1269 } 1270 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1271 1272 /* 1273 * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto 1274 * the enomem_io list. 1275 */ 1276 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1277 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1278 CU_ASSERT(rc == 0); 1279 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1280 first_io = TAILQ_FIRST(&shared_resource->nomem_io); 1281 1282 /* 1283 * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind 1284 * the first_io above. 1285 */ 1286 for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) { 1287 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1288 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1289 CU_ASSERT(rc == 0); 1290 } 1291 1292 /* Assert that first_io is still at the head of the list. */ 1293 CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io); 1294 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL)); 1295 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); 1296 CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT)); 1297 1298 /* 1299 * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have 1300 * changed since completing just 1 I/O should not trigger retrying the queued nomem_io 1301 * list. 1302 */ 1303 stub_complete_io(g_bdev.io_target, 1); 1304 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); 1305 1306 /* 1307 * Complete enough I/O to hit the nomem_threshold. This should trigger retrying nomem_io, 1308 * and we should see I/O get resubmitted to the test bdev module. 1309 */ 1310 stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1); 1311 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt); 1312 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); 1313 1314 /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */ 1315 stub_complete_io(g_bdev.io_target, 1); 1316 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); 1317 1318 /* 1319 * Send a reset and confirm that all I/O are completed, including the ones that 1320 * were queued on the nomem_io list. 1321 */ 1322 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 1323 rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset); 1324 poll_threads(); 1325 CU_ASSERT(rc == 0); 1326 /* This will complete the reset. */ 1327 stub_complete_io(g_bdev.io_target, 0); 1328 1329 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0); 1330 CU_ASSERT(shared_resource->io_outstanding == 0); 1331 1332 spdk_put_io_channel(io_ch); 1333 poll_threads(); 1334 teardown_test(); 1335 } 1336 1337 static void 1338 enomem_multi_bdev(void) 1339 { 1340 struct spdk_io_channel *io_ch; 1341 struct spdk_bdev_channel *bdev_ch; 1342 struct spdk_bdev_shared_resource *shared_resource; 1343 struct ut_bdev_channel *ut_ch; 1344 const uint32_t IO_ARRAY_SIZE = 64; 1345 const uint32_t AVAIL = 20; 1346 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1347 uint32_t i; 1348 struct ut_bdev *second_bdev; 1349 struct spdk_bdev_desc *second_desc = NULL; 1350 struct spdk_bdev_channel *second_bdev_ch; 1351 struct spdk_io_channel *second_ch; 1352 int rc; 1353 1354 setup_test(); 1355 1356 /* Register second bdev with the same io_target */ 1357 second_bdev = calloc(1, sizeof(*second_bdev)); 1358 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 1359 register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target); 1360 spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc); 1361 SPDK_CU_ASSERT_FATAL(second_desc != NULL); 1362 1363 set_thread(0); 1364 io_ch = spdk_bdev_get_io_channel(g_desc); 1365 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1366 shared_resource = bdev_ch->shared_resource; 1367 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1368 ut_ch->avail_cnt = AVAIL; 1369 1370 second_ch = spdk_bdev_get_io_channel(second_desc); 1371 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 1372 SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource); 1373 1374 /* Saturate io_target through bdev A. */ 1375 for (i = 0; i < AVAIL; i++) { 1376 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1377 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1378 CU_ASSERT(rc == 0); 1379 } 1380 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1381 1382 /* 1383 * Now submit I/O through the second bdev. This should fail with ENOMEM 1384 * and then go onto the nomem_io list. 1385 */ 1386 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1387 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1388 CU_ASSERT(rc == 0); 1389 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1390 1391 /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */ 1392 stub_complete_io(g_bdev.io_target, AVAIL); 1393 1394 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io)); 1395 CU_ASSERT(shared_resource->io_outstanding == 1); 1396 1397 /* Now complete our retried I/O */ 1398 stub_complete_io(g_bdev.io_target, 1); 1399 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0); 1400 1401 spdk_put_io_channel(io_ch); 1402 spdk_put_io_channel(second_ch); 1403 spdk_bdev_close(second_desc); 1404 unregister_bdev(second_bdev); 1405 poll_threads(); 1406 free(second_bdev); 1407 teardown_test(); 1408 } 1409 1410 static void 1411 enomem_multi_bdev_unregister(void) 1412 { 1413 struct spdk_io_channel *io_ch; 1414 struct spdk_bdev_channel *bdev_ch; 1415 struct spdk_bdev_shared_resource *shared_resource; 1416 struct ut_bdev_channel *ut_ch; 1417 const uint32_t IO_ARRAY_SIZE = 64; 1418 const uint32_t AVAIL = 20; 1419 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1420 uint32_t i; 1421 int rc; 1422 1423 setup_test(); 1424 1425 set_thread(0); 1426 io_ch = spdk_bdev_get_io_channel(g_desc); 1427 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1428 shared_resource = bdev_ch->shared_resource; 1429 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1430 ut_ch->avail_cnt = AVAIL; 1431 1432 /* Saturate io_target through the bdev. */ 1433 for (i = 0; i < AVAIL; i++) { 1434 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1435 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1436 CU_ASSERT(rc == 0); 1437 } 1438 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1439 1440 /* 1441 * Now submit I/O through the bdev. This should fail with ENOMEM 1442 * and then go onto the nomem_io list. 1443 */ 1444 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1445 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1446 CU_ASSERT(rc == 0); 1447 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1448 1449 /* Unregister the bdev to abort the IOs from nomem_io queue. */ 1450 unregister_bdev(&g_bdev); 1451 CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED); 1452 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io)); 1453 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL); 1454 1455 /* Complete the bdev's I/O. */ 1456 stub_complete_io(g_bdev.io_target, AVAIL); 1457 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0); 1458 1459 spdk_put_io_channel(io_ch); 1460 poll_threads(); 1461 teardown_test(); 1462 } 1463 1464 static void 1465 enomem_multi_io_target(void) 1466 { 1467 struct spdk_io_channel *io_ch; 1468 struct spdk_bdev_channel *bdev_ch; 1469 struct ut_bdev_channel *ut_ch; 1470 const uint32_t IO_ARRAY_SIZE = 64; 1471 const uint32_t AVAIL = 20; 1472 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1473 uint32_t i; 1474 int new_io_device; 1475 struct ut_bdev *second_bdev; 1476 struct spdk_bdev_desc *second_desc = NULL; 1477 struct spdk_bdev_channel *second_bdev_ch; 1478 struct spdk_io_channel *second_ch; 1479 int rc; 1480 1481 setup_test(); 1482 1483 /* Create new io_target and a second bdev using it */ 1484 spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch, 1485 sizeof(struct ut_bdev_channel), NULL); 1486 second_bdev = calloc(1, sizeof(*second_bdev)); 1487 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 1488 register_bdev(second_bdev, "ut_bdev2", &new_io_device); 1489 spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc); 1490 SPDK_CU_ASSERT_FATAL(second_desc != NULL); 1491 1492 set_thread(0); 1493 io_ch = spdk_bdev_get_io_channel(g_desc); 1494 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1495 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1496 ut_ch->avail_cnt = AVAIL; 1497 1498 /* Different io_target should imply a different shared_resource */ 1499 second_ch = spdk_bdev_get_io_channel(second_desc); 1500 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 1501 SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource); 1502 1503 /* Saturate io_target through bdev A. */ 1504 for (i = 0; i < AVAIL; i++) { 1505 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1506 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1507 CU_ASSERT(rc == 0); 1508 } 1509 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1510 1511 /* Issue one more I/O to fill ENOMEM list. */ 1512 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1513 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1514 CU_ASSERT(rc == 0); 1515 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1516 1517 /* 1518 * Now submit I/O through the second bdev. This should go through and complete 1519 * successfully because we're using a different io_device underneath. 1520 */ 1521 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1522 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1523 CU_ASSERT(rc == 0); 1524 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io)); 1525 stub_complete_io(second_bdev->io_target, 1); 1526 1527 /* Cleanup; Complete outstanding I/O. */ 1528 stub_complete_io(g_bdev.io_target, AVAIL); 1529 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1530 /* Complete the ENOMEM I/O */ 1531 stub_complete_io(g_bdev.io_target, 1); 1532 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); 1533 1534 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1535 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); 1536 spdk_put_io_channel(io_ch); 1537 spdk_put_io_channel(second_ch); 1538 spdk_bdev_close(second_desc); 1539 unregister_bdev(second_bdev); 1540 spdk_io_device_unregister(&new_io_device, NULL); 1541 poll_threads(); 1542 free(second_bdev); 1543 teardown_test(); 1544 } 1545 1546 static void 1547 qos_dynamic_enable_done(void *cb_arg, int status) 1548 { 1549 int *rc = cb_arg; 1550 *rc = status; 1551 } 1552 1553 static void 1554 qos_dynamic_enable(void) 1555 { 1556 struct spdk_io_channel *io_ch[2]; 1557 struct spdk_bdev_channel *bdev_ch[2]; 1558 struct spdk_bdev *bdev; 1559 enum spdk_bdev_io_status bdev_io_status[2]; 1560 uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {}; 1561 int status, second_status, rc, i; 1562 1563 setup_test(); 1564 MOCK_SET(spdk_get_ticks, 0); 1565 1566 for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) { 1567 limits[i] = UINT64_MAX; 1568 } 1569 1570 bdev = &g_bdev.bdev; 1571 1572 g_get_io_channel = true; 1573 1574 /* Create channels */ 1575 set_thread(0); 1576 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1577 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1578 CU_ASSERT(bdev_ch[0]->flags == 0); 1579 1580 set_thread(1); 1581 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1582 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1583 CU_ASSERT(bdev_ch[1]->flags == 0); 1584 1585 set_thread(0); 1586 1587 /* 1588 * Enable QoS: Read/Write IOPS, Read/Write byte, 1589 * Read only byte and Write only byte per second 1590 * rate limits. 1591 * More than 10 I/Os allowed per timeslice. 1592 */ 1593 status = -1; 1594 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1595 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100; 1596 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100; 1597 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10; 1598 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1599 poll_threads(); 1600 CU_ASSERT(status == 0); 1601 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1602 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1603 1604 /* 1605 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice. 1606 * Additional I/O will then be queued. 1607 */ 1608 set_thread(0); 1609 for (i = 0; i < 10; i++) { 1610 bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING; 1611 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]); 1612 CU_ASSERT(rc == 0); 1613 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING); 1614 poll_thread(0); 1615 stub_complete_io(g_bdev.io_target, 0); 1616 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS); 1617 } 1618 1619 /* 1620 * Send two more I/O. These I/O will be queued since the current timeslice allotment has been 1621 * filled already. We want to test that when QoS is disabled that these two I/O: 1622 * 1) are not aborted 1623 * 2) are sent back to their original thread for resubmission 1624 */ 1625 bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING; 1626 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]); 1627 CU_ASSERT(rc == 0); 1628 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING); 1629 set_thread(1); 1630 bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING; 1631 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]); 1632 CU_ASSERT(rc == 0); 1633 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING); 1634 poll_threads(); 1635 1636 /* 1637 * Disable QoS: Read/Write IOPS, Read/Write byte, 1638 * Read only byte rate limits 1639 */ 1640 status = -1; 1641 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1642 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0; 1643 limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0; 1644 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1645 poll_threads(); 1646 CU_ASSERT(status == 0); 1647 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1648 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1649 1650 /* Disable QoS: Write only Byte per second rate limit */ 1651 status = -1; 1652 limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0; 1653 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1654 poll_threads(); 1655 CU_ASSERT(status == 0); 1656 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1657 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1658 1659 /* 1660 * All I/O should have been resubmitted back on their original thread. Complete 1661 * all I/O on thread 0, and ensure that only the thread 0 I/O was completed. 1662 */ 1663 set_thread(0); 1664 stub_complete_io(g_bdev.io_target, 0); 1665 poll_threads(); 1666 CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS); 1667 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING); 1668 1669 /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */ 1670 set_thread(1); 1671 stub_complete_io(g_bdev.io_target, 0); 1672 poll_threads(); 1673 CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS); 1674 1675 /* Disable QoS again */ 1676 status = -1; 1677 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1678 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1679 poll_threads(); 1680 CU_ASSERT(status == 0); /* This should succeed */ 1681 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1682 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1683 1684 /* Enable QoS on thread 0 */ 1685 status = -1; 1686 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1687 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1688 poll_threads(); 1689 CU_ASSERT(status == 0); 1690 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1691 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1692 1693 /* Disable QoS on thread 1 */ 1694 set_thread(1); 1695 status = -1; 1696 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0; 1697 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1698 /* Don't poll yet. This should leave the channels with QoS enabled */ 1699 CU_ASSERT(status == -1); 1700 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1701 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1702 1703 /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */ 1704 second_status = 0; 1705 limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10; 1706 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status); 1707 poll_threads(); 1708 CU_ASSERT(status == 0); /* The disable should succeed */ 1709 CU_ASSERT(second_status < 0); /* The enable should fail */ 1710 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1711 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1712 1713 /* Enable QoS on thread 1. This should succeed now that the disable has completed. */ 1714 status = -1; 1715 limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000; 1716 spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status); 1717 poll_threads(); 1718 CU_ASSERT(status == 0); 1719 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1720 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1721 1722 /* Tear down the channels */ 1723 set_thread(0); 1724 spdk_put_io_channel(io_ch[0]); 1725 set_thread(1); 1726 spdk_put_io_channel(io_ch[1]); 1727 poll_threads(); 1728 1729 set_thread(0); 1730 teardown_test(); 1731 } 1732 1733 static void 1734 histogram_status_cb(void *cb_arg, int status) 1735 { 1736 g_status = status; 1737 } 1738 1739 static void 1740 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 1741 { 1742 g_status = status; 1743 g_histogram = histogram; 1744 } 1745 1746 static void 1747 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 1748 uint64_t total, uint64_t so_far) 1749 { 1750 g_count += count; 1751 } 1752 1753 static void 1754 bdev_histograms_mt(void) 1755 { 1756 struct spdk_io_channel *ch[2]; 1757 struct spdk_histogram_data *histogram; 1758 uint8_t buf[4096]; 1759 int status = false; 1760 int rc; 1761 1762 1763 setup_test(); 1764 1765 set_thread(0); 1766 ch[0] = spdk_bdev_get_io_channel(g_desc); 1767 CU_ASSERT(ch[0] != NULL); 1768 1769 set_thread(1); 1770 ch[1] = spdk_bdev_get_io_channel(g_desc); 1771 CU_ASSERT(ch[1] != NULL); 1772 1773 1774 /* Enable histogram */ 1775 spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true); 1776 poll_threads(); 1777 CU_ASSERT(g_status == 0); 1778 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true); 1779 1780 /* Allocate histogram */ 1781 histogram = spdk_histogram_data_alloc(); 1782 1783 /* Check if histogram is zeroed */ 1784 spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL); 1785 poll_threads(); 1786 CU_ASSERT(g_status == 0); 1787 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 1788 1789 g_count = 0; 1790 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 1791 1792 CU_ASSERT(g_count == 0); 1793 1794 set_thread(0); 1795 rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status); 1796 CU_ASSERT(rc == 0); 1797 1798 spdk_delay_us(10); 1799 stub_complete_io(g_bdev.io_target, 1); 1800 poll_threads(); 1801 CU_ASSERT(status == true); 1802 1803 1804 set_thread(1); 1805 rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status); 1806 CU_ASSERT(rc == 0); 1807 1808 spdk_delay_us(10); 1809 stub_complete_io(g_bdev.io_target, 1); 1810 poll_threads(); 1811 CU_ASSERT(status == true); 1812 1813 set_thread(0); 1814 1815 /* Check if histogram gathered data from all I/O channels */ 1816 spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL); 1817 poll_threads(); 1818 CU_ASSERT(g_status == 0); 1819 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true); 1820 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 1821 1822 g_count = 0; 1823 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 1824 CU_ASSERT(g_count == 2); 1825 1826 /* Disable histogram */ 1827 spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false); 1828 poll_threads(); 1829 CU_ASSERT(g_status == 0); 1830 CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false); 1831 1832 spdk_histogram_data_free(histogram); 1833 1834 /* Tear down the channels */ 1835 set_thread(0); 1836 spdk_put_io_channel(ch[0]); 1837 set_thread(1); 1838 spdk_put_io_channel(ch[1]); 1839 poll_threads(); 1840 set_thread(0); 1841 teardown_test(); 1842 1843 } 1844 1845 struct timeout_io_cb_arg { 1846 struct iovec iov; 1847 uint8_t type; 1848 }; 1849 1850 static int 1851 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 1852 { 1853 struct spdk_bdev_io *bdev_io; 1854 int n = 0; 1855 1856 if (!ch) { 1857 return -1; 1858 } 1859 1860 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 1861 n++; 1862 } 1863 1864 return n; 1865 } 1866 1867 static void 1868 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 1869 { 1870 struct timeout_io_cb_arg *ctx = cb_arg; 1871 1872 ctx->type = bdev_io->type; 1873 ctx->iov.iov_base = bdev_io->iov.iov_base; 1874 ctx->iov.iov_len = bdev_io->iov.iov_len; 1875 } 1876 1877 static bool g_io_done; 1878 1879 static void 1880 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1881 { 1882 g_io_done = true; 1883 spdk_bdev_free_io(bdev_io); 1884 } 1885 1886 static void 1887 bdev_set_io_timeout_mt(void) 1888 { 1889 struct spdk_io_channel *ch[3]; 1890 struct spdk_bdev_channel *bdev_ch[3]; 1891 struct timeout_io_cb_arg cb_arg; 1892 1893 setup_test(); 1894 1895 g_bdev.bdev.optimal_io_boundary = 16; 1896 g_bdev.bdev.split_on_optimal_io_boundary = true; 1897 1898 set_thread(0); 1899 ch[0] = spdk_bdev_get_io_channel(g_desc); 1900 CU_ASSERT(ch[0] != NULL); 1901 1902 set_thread(1); 1903 ch[1] = spdk_bdev_get_io_channel(g_desc); 1904 CU_ASSERT(ch[1] != NULL); 1905 1906 set_thread(2); 1907 ch[2] = spdk_bdev_get_io_channel(g_desc); 1908 CU_ASSERT(ch[2] != NULL); 1909 1910 /* Multi-thread mode 1911 * 1, Check the poller was registered successfully 1912 * 2, Check the timeout IO and ensure the IO was the submitted by user 1913 * 3, Check the link int the bdev_ch works right. 1914 * 4, Close desc and put io channel during the timeout poller is polling 1915 */ 1916 1917 /* In desc thread set the timeout */ 1918 set_thread(0); 1919 CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0); 1920 CU_ASSERT(g_desc->io_timeout_poller != NULL); 1921 CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb); 1922 CU_ASSERT(g_desc->cb_arg == &cb_arg); 1923 1924 /* check the IO submitted list and timeout handler */ 1925 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0); 1926 bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]); 1927 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1); 1928 1929 set_thread(1); 1930 CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0); 1931 bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]); 1932 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1); 1933 1934 /* Now test that a single-vector command is split correctly. 1935 * Offset 14, length 8, payload 0xF000 1936 * Child - Offset 14, length 2, payload 0xF000 1937 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1938 * 1939 * Set up the expected values before calling spdk_bdev_read_blocks 1940 */ 1941 set_thread(2); 1942 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0); 1943 bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]); 1944 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3); 1945 1946 set_thread(0); 1947 memset(&cb_arg, 0, sizeof(cb_arg)); 1948 spdk_delay_us(3 * spdk_get_ticks_hz()); 1949 poll_threads(); 1950 CU_ASSERT(cb_arg.type == 0); 1951 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 1952 CU_ASSERT(cb_arg.iov.iov_len == 0); 1953 1954 /* Now the time reach the limit */ 1955 spdk_delay_us(3 * spdk_get_ticks_hz()); 1956 poll_thread(0); 1957 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ); 1958 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000); 1959 CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen); 1960 stub_complete_io(g_bdev.io_target, 1); 1961 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0); 1962 1963 memset(&cb_arg, 0, sizeof(cb_arg)); 1964 set_thread(1); 1965 poll_thread(1); 1966 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 1967 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 1968 CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen); 1969 stub_complete_io(g_bdev.io_target, 1); 1970 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0); 1971 1972 memset(&cb_arg, 0, sizeof(cb_arg)); 1973 set_thread(2); 1974 poll_thread(2); 1975 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ); 1976 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 1977 CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen); 1978 stub_complete_io(g_bdev.io_target, 1); 1979 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2); 1980 stub_complete_io(g_bdev.io_target, 1); 1981 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0); 1982 1983 /* Run poll_timeout_done() it means complete the timeout poller */ 1984 set_thread(0); 1985 poll_thread(0); 1986 CU_ASSERT(g_desc->refs == 0); 1987 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0); 1988 set_thread(1); 1989 CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0); 1990 set_thread(2); 1991 CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0); 1992 1993 /* Trigger timeout poller to run again, desc->refs is incremented. 1994 * In thread 0 we destroy the io channel before timeout poller runs. 1995 * Timeout callback is not called on thread 0. 1996 */ 1997 spdk_delay_us(6 * spdk_get_ticks_hz()); 1998 memset(&cb_arg, 0, sizeof(cb_arg)); 1999 set_thread(0); 2000 stub_complete_io(g_bdev.io_target, 1); 2001 spdk_put_io_channel(ch[0]); 2002 poll_thread(0); 2003 CU_ASSERT(g_desc->refs == 1) 2004 CU_ASSERT(cb_arg.type == 0); 2005 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2006 CU_ASSERT(cb_arg.iov.iov_len == 0); 2007 2008 /* In thread 1 timeout poller runs then we destroy the io channel 2009 * Timeout callback is called on thread 1. 2010 */ 2011 memset(&cb_arg, 0, sizeof(cb_arg)); 2012 set_thread(1); 2013 poll_thread(1); 2014 stub_complete_io(g_bdev.io_target, 1); 2015 spdk_put_io_channel(ch[1]); 2016 poll_thread(1); 2017 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2018 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000); 2019 CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen); 2020 2021 /* Close the desc. 2022 * Unregister the timeout poller first. 2023 * Then decrement desc->refs but it's not zero yet so desc is not freed. 2024 */ 2025 set_thread(0); 2026 spdk_bdev_close(g_desc); 2027 CU_ASSERT(g_desc->refs == 1); 2028 CU_ASSERT(g_desc->io_timeout_poller == NULL); 2029 2030 /* Timeout poller runs on thread 2 then we destroy the io channel. 2031 * Desc is closed so we would exit the timeout poller directly. 2032 * timeout callback is not called on thread 2. 2033 */ 2034 memset(&cb_arg, 0, sizeof(cb_arg)); 2035 set_thread(2); 2036 poll_thread(2); 2037 stub_complete_io(g_bdev.io_target, 1); 2038 spdk_put_io_channel(ch[2]); 2039 poll_thread(2); 2040 CU_ASSERT(cb_arg.type == 0); 2041 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2042 CU_ASSERT(cb_arg.iov.iov_len == 0); 2043 2044 set_thread(0); 2045 poll_thread(0); 2046 g_teardown_done = false; 2047 unregister_bdev(&g_bdev); 2048 spdk_io_device_unregister(&g_io_device, NULL); 2049 spdk_bdev_finish(finish_cb, NULL); 2050 spdk_iobuf_finish(finish_cb, NULL); 2051 poll_threads(); 2052 memset(&g_bdev, 0, sizeof(g_bdev)); 2053 CU_ASSERT(g_teardown_done == true); 2054 g_teardown_done = false; 2055 free_threads(); 2056 free_cores(); 2057 } 2058 2059 static bool g_io_done2; 2060 static bool g_lock_lba_range_done; 2061 static bool g_unlock_lba_range_done; 2062 2063 static void 2064 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 2065 { 2066 g_io_done2 = true; 2067 spdk_bdev_free_io(bdev_io); 2068 } 2069 2070 static void 2071 lock_lba_range_done(void *ctx, int status) 2072 { 2073 g_lock_lba_range_done = true; 2074 } 2075 2076 static void 2077 unlock_lba_range_done(void *ctx, int status) 2078 { 2079 g_unlock_lba_range_done = true; 2080 } 2081 2082 static uint32_t 2083 stub_channel_outstanding_cnt(void *io_target) 2084 { 2085 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 2086 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 2087 uint32_t outstanding_cnt; 2088 2089 outstanding_cnt = ch->outstanding_cnt; 2090 2091 spdk_put_io_channel(_ch); 2092 return outstanding_cnt; 2093 } 2094 2095 static void 2096 lock_lba_range_then_submit_io(void) 2097 { 2098 struct spdk_bdev_desc *desc = NULL; 2099 void *io_target; 2100 struct spdk_io_channel *io_ch[3]; 2101 struct spdk_bdev_channel *bdev_ch[3]; 2102 struct lba_range *range; 2103 char buf[4096]; 2104 int ctx0, ctx1, ctx2; 2105 int rc; 2106 2107 setup_test(); 2108 2109 io_target = g_bdev.io_target; 2110 desc = g_desc; 2111 2112 set_thread(0); 2113 io_ch[0] = spdk_bdev_get_io_channel(desc); 2114 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 2115 CU_ASSERT(io_ch[0] != NULL); 2116 2117 set_thread(1); 2118 io_ch[1] = spdk_bdev_get_io_channel(desc); 2119 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 2120 CU_ASSERT(io_ch[1] != NULL); 2121 2122 set_thread(0); 2123 g_lock_lba_range_done = false; 2124 rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0); 2125 CU_ASSERT(rc == 0); 2126 poll_threads(); 2127 2128 /* The lock should immediately become valid, since there are no outstanding 2129 * write I/O. 2130 */ 2131 CU_ASSERT(g_lock_lba_range_done == true); 2132 range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges); 2133 SPDK_CU_ASSERT_FATAL(range != NULL); 2134 CU_ASSERT(range->offset == 20); 2135 CU_ASSERT(range->length == 10); 2136 CU_ASSERT(range->owner_ch == bdev_ch[0]); 2137 2138 g_io_done = false; 2139 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2140 rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0); 2141 CU_ASSERT(rc == 0); 2142 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2143 2144 stub_complete_io(io_target, 1); 2145 poll_threads(); 2146 CU_ASSERT(g_io_done == true); 2147 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2148 2149 /* Try a write I/O. This should actually be allowed to execute, since the channel 2150 * holding the lock is submitting the write I/O. 2151 */ 2152 g_io_done = false; 2153 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2154 rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0); 2155 CU_ASSERT(rc == 0); 2156 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2157 2158 stub_complete_io(io_target, 1); 2159 poll_threads(); 2160 CU_ASSERT(g_io_done == true); 2161 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked)); 2162 2163 /* Try a write I/O. This should get queued in the io_locked tailq. */ 2164 set_thread(1); 2165 g_io_done = false; 2166 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2167 rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1); 2168 CU_ASSERT(rc == 0); 2169 poll_threads(); 2170 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0); 2171 CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2172 CU_ASSERT(g_io_done == false); 2173 2174 /* Try to unlock the lba range using thread 1's io_ch. This should fail. */ 2175 rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1); 2176 CU_ASSERT(rc == -EINVAL); 2177 2178 /* Now create a new channel and submit a write I/O with it. This should also be queued. 2179 * The new channel should inherit the active locks from the bdev's internal list. 2180 */ 2181 set_thread(2); 2182 io_ch[2] = spdk_bdev_get_io_channel(desc); 2183 bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]); 2184 CU_ASSERT(io_ch[2] != NULL); 2185 2186 g_io_done2 = false; 2187 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2188 rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2); 2189 CU_ASSERT(rc == 0); 2190 poll_threads(); 2191 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0); 2192 CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2193 CU_ASSERT(g_io_done2 == false); 2194 2195 set_thread(0); 2196 rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0); 2197 CU_ASSERT(rc == 0); 2198 poll_threads(); 2199 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges)); 2200 2201 /* The LBA range is unlocked, so the write IOs should now have started execution. */ 2202 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked)); 2203 CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked)); 2204 2205 set_thread(1); 2206 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2207 stub_complete_io(io_target, 1); 2208 set_thread(2); 2209 CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1); 2210 stub_complete_io(io_target, 1); 2211 2212 poll_threads(); 2213 CU_ASSERT(g_io_done == true); 2214 CU_ASSERT(g_io_done2 == true); 2215 2216 /* Tear down the channels */ 2217 set_thread(0); 2218 spdk_put_io_channel(io_ch[0]); 2219 set_thread(1); 2220 spdk_put_io_channel(io_ch[1]); 2221 set_thread(2); 2222 spdk_put_io_channel(io_ch[2]); 2223 poll_threads(); 2224 set_thread(0); 2225 teardown_test(); 2226 } 2227 2228 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel(). 2229 * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However 2230 * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel(). 2231 * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset() 2232 * completes. Test this behavior. 2233 */ 2234 static void 2235 unregister_during_reset(void) 2236 { 2237 struct spdk_io_channel *io_ch[2]; 2238 bool done_reset = false, done_unregister = false; 2239 int rc; 2240 2241 setup_test(); 2242 set_thread(0); 2243 2244 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 2245 SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL); 2246 2247 set_thread(1); 2248 2249 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 2250 SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL); 2251 2252 set_thread(0); 2253 2254 CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL); 2255 2256 rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset); 2257 CU_ASSERT(rc == 0); 2258 2259 set_thread(0); 2260 2261 poll_thread_times(0, 1); 2262 2263 spdk_bdev_close(g_desc); 2264 spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister); 2265 2266 CU_ASSERT(done_reset == false); 2267 CU_ASSERT(done_unregister == false); 2268 2269 poll_threads(); 2270 2271 stub_complete_io(g_bdev.io_target, 0); 2272 2273 poll_threads(); 2274 2275 CU_ASSERT(done_reset == true); 2276 CU_ASSERT(done_unregister == false); 2277 2278 spdk_put_io_channel(io_ch[0]); 2279 2280 set_thread(1); 2281 2282 spdk_put_io_channel(io_ch[1]); 2283 2284 poll_threads(); 2285 2286 CU_ASSERT(done_unregister == true); 2287 2288 /* Restore the original g_bdev so that we can use teardown_test(). */ 2289 set_thread(0); 2290 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 2291 spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc); 2292 teardown_test(); 2293 } 2294 2295 int 2296 main(int argc, char **argv) 2297 { 2298 CU_pSuite suite = NULL; 2299 unsigned int num_failures; 2300 2301 CU_set_error_action(CUEA_ABORT); 2302 CU_initialize_registry(); 2303 2304 suite = CU_add_suite("bdev", NULL, NULL); 2305 2306 CU_ADD_TEST(suite, basic); 2307 CU_ADD_TEST(suite, unregister_and_close); 2308 CU_ADD_TEST(suite, basic_qos); 2309 CU_ADD_TEST(suite, put_channel_during_reset); 2310 CU_ADD_TEST(suite, aborted_reset); 2311 CU_ADD_TEST(suite, aborted_reset_no_outstanding_io); 2312 CU_ADD_TEST(suite, io_during_reset); 2313 CU_ADD_TEST(suite, reset_completions); 2314 CU_ADD_TEST(suite, io_during_qos_queue); 2315 CU_ADD_TEST(suite, io_during_qos_reset); 2316 CU_ADD_TEST(suite, enomem); 2317 CU_ADD_TEST(suite, enomem_multi_bdev); 2318 CU_ADD_TEST(suite, enomem_multi_bdev_unregister); 2319 CU_ADD_TEST(suite, enomem_multi_io_target); 2320 CU_ADD_TEST(suite, qos_dynamic_enable); 2321 CU_ADD_TEST(suite, bdev_histograms_mt); 2322 CU_ADD_TEST(suite, bdev_set_io_timeout_mt); 2323 CU_ADD_TEST(suite, lock_lba_range_then_submit_io); 2324 CU_ADD_TEST(suite, unregister_during_reset); 2325 2326 CU_basic_set_mode(CU_BRM_VERBOSE); 2327 CU_basic_run_tests(); 2328 num_failures = CU_get_number_of_failures(); 2329 CU_cleanup_registry(); 2330 return num_failures; 2331 } 2332