1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/test_env.c" 37 #include "common/lib/ut_multithread.c" 38 #include "unit/lib/json_mock.c" 39 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 #define BDEV_UT_NUM_THREADS 3 46 47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, 48 int *sc, int *sk, int *asc, int *ascq)); 49 50 /* Return NULL to test hardcoded defaults. */ 51 struct spdk_conf_section * 52 spdk_conf_find_section(struct spdk_conf *cp, const char *name) 53 { 54 return NULL; 55 } 56 57 /* Return NULL to test hardcoded defaults. */ 58 char * 59 spdk_conf_section_get_nmval(struct spdk_conf_section *sp, const char *key, int idx1, int idx2) 60 { 61 return NULL; 62 } 63 64 struct ut_bdev { 65 struct spdk_bdev bdev; 66 void *io_target; 67 }; 68 69 struct ut_bdev_channel { 70 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 71 uint32_t outstanding_cnt; 72 uint32_t avail_cnt; 73 }; 74 75 int g_io_device; 76 struct ut_bdev g_bdev; 77 struct spdk_bdev_desc *g_desc; 78 bool g_teardown_done = false; 79 bool g_get_io_channel = true; 80 bool g_create_ch = true; 81 bool g_init_complete_called = false; 82 83 static int 84 stub_create_ch(void *io_device, void *ctx_buf) 85 { 86 struct ut_bdev_channel *ch = ctx_buf; 87 88 if (g_create_ch == false) { 89 return -1; 90 } 91 92 TAILQ_INIT(&ch->outstanding_io); 93 ch->outstanding_cnt = 0; 94 /* 95 * When avail gets to 0, the submit_request function will return ENOMEM. 96 * Most tests to not want ENOMEM to occur, so by default set this to a 97 * big value that won't get hit. The ENOMEM tests can then override this 98 * value to something much smaller to induce ENOMEM conditions. 99 */ 100 ch->avail_cnt = 2048; 101 return 0; 102 } 103 104 static void 105 stub_destroy_ch(void *io_device, void *ctx_buf) 106 { 107 } 108 109 static struct spdk_io_channel * 110 stub_get_io_channel(void *ctx) 111 { 112 struct ut_bdev *ut_bdev = ctx; 113 114 if (g_get_io_channel == true) { 115 return spdk_get_io_channel(ut_bdev->io_target); 116 } else { 117 return NULL; 118 } 119 } 120 121 static int 122 stub_destruct(void *ctx) 123 { 124 return 0; 125 } 126 127 static void 128 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 129 { 130 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 131 132 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { 133 struct spdk_bdev_io *io; 134 135 while (!TAILQ_EMPTY(&ch->outstanding_io)) { 136 io = TAILQ_FIRST(&ch->outstanding_io); 137 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 138 ch->outstanding_cnt--; 139 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED); 140 ch->avail_cnt++; 141 } 142 } 143 144 if (ch->avail_cnt > 0) { 145 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 146 ch->outstanding_cnt++; 147 ch->avail_cnt--; 148 } else { 149 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 150 } 151 } 152 153 static uint32_t 154 stub_complete_io(void *io_target, uint32_t num_to_complete) 155 { 156 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 157 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 158 struct spdk_bdev_io *io; 159 bool complete_all = (num_to_complete == 0); 160 uint32_t num_completed = 0; 161 162 while (complete_all || num_completed < num_to_complete) { 163 if (TAILQ_EMPTY(&ch->outstanding_io)) { 164 break; 165 } 166 io = TAILQ_FIRST(&ch->outstanding_io); 167 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 168 ch->outstanding_cnt--; 169 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS); 170 ch->avail_cnt++; 171 num_completed++; 172 } 173 174 spdk_put_io_channel(_ch); 175 return num_completed; 176 } 177 178 static struct spdk_bdev_fn_table fn_table = { 179 .get_io_channel = stub_get_io_channel, 180 .destruct = stub_destruct, 181 .submit_request = stub_submit_request, 182 }; 183 184 static int 185 module_init(void) 186 { 187 return 0; 188 } 189 190 static void 191 module_fini(void) 192 { 193 } 194 195 static void 196 init_complete(void) 197 { 198 g_init_complete_called = true; 199 } 200 201 struct spdk_bdev_module bdev_ut_if = { 202 .name = "bdev_ut", 203 .module_init = module_init, 204 .module_fini = module_fini, 205 .init_complete = init_complete, 206 }; 207 208 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if) 209 210 static void 211 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target) 212 { 213 memset(ut_bdev, 0, sizeof(*ut_bdev)); 214 215 ut_bdev->io_target = io_target; 216 ut_bdev->bdev.ctxt = ut_bdev; 217 ut_bdev->bdev.name = name; 218 ut_bdev->bdev.fn_table = &fn_table; 219 ut_bdev->bdev.module = &bdev_ut_if; 220 ut_bdev->bdev.blocklen = 4096; 221 ut_bdev->bdev.blockcnt = 1024; 222 223 spdk_bdev_register(&ut_bdev->bdev); 224 } 225 226 static void 227 unregister_bdev(struct ut_bdev *ut_bdev) 228 { 229 /* Handle any deferred messages. */ 230 poll_threads(); 231 spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL); 232 } 233 234 static void 235 bdev_init_cb(void *done, int rc) 236 { 237 CU_ASSERT(rc == 0); 238 *(bool *)done = true; 239 } 240 241 static void 242 setup_test(void) 243 { 244 bool done = false; 245 246 allocate_threads(BDEV_UT_NUM_THREADS); 247 spdk_bdev_initialize(bdev_init_cb, &done); 248 spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch, 249 sizeof(struct ut_bdev_channel)); 250 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 251 spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc); 252 } 253 254 static void 255 finish_cb(void *cb_arg) 256 { 257 g_teardown_done = true; 258 } 259 260 static void 261 teardown_test(void) 262 { 263 g_teardown_done = false; 264 spdk_bdev_close(g_desc); 265 g_desc = NULL; 266 unregister_bdev(&g_bdev); 267 spdk_io_device_unregister(&g_io_device, NULL); 268 spdk_bdev_finish(finish_cb, NULL); 269 poll_threads(); 270 memset(&g_bdev, 0, sizeof(g_bdev)); 271 CU_ASSERT(g_teardown_done == true); 272 g_teardown_done = false; 273 free_threads(); 274 } 275 276 static uint32_t 277 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq) 278 { 279 struct spdk_bdev_io *io; 280 uint32_t cnt = 0; 281 282 TAILQ_FOREACH(io, tailq, link) { 283 cnt++; 284 } 285 286 return cnt; 287 } 288 289 static void 290 basic(void) 291 { 292 g_init_complete_called = false; 293 setup_test(); 294 CU_ASSERT(g_init_complete_called == true); 295 296 set_thread(0); 297 298 g_get_io_channel = false; 299 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 300 CU_ASSERT(g_ut_threads[0].ch == NULL); 301 302 g_get_io_channel = true; 303 g_create_ch = false; 304 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 305 CU_ASSERT(g_ut_threads[0].ch == NULL); 306 307 g_get_io_channel = true; 308 g_create_ch = true; 309 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 310 CU_ASSERT(g_ut_threads[0].ch != NULL); 311 spdk_put_io_channel(g_ut_threads[0].ch); 312 313 teardown_test(); 314 } 315 316 static int 317 poller_run_done(void *ctx) 318 { 319 bool *poller_run = ctx; 320 321 *poller_run = true; 322 323 return -1; 324 } 325 326 static int 327 poller_run_times_done(void *ctx) 328 { 329 int *poller_run_times = ctx; 330 331 (*poller_run_times)++; 332 333 return -1; 334 } 335 336 static void 337 basic_poller(void) 338 { 339 struct spdk_poller *poller = NULL; 340 bool poller_run = false; 341 int poller_run_times = 0; 342 343 setup_test(); 344 345 set_thread(0); 346 reset_time(); 347 /* Register a poller with no-wait time and test execution */ 348 poller = spdk_poller_register(poller_run_done, &poller_run, 0); 349 CU_ASSERT(poller != NULL); 350 351 poll_threads(); 352 CU_ASSERT(poller_run == true); 353 354 spdk_poller_unregister(&poller); 355 CU_ASSERT(poller == NULL); 356 357 /* Register a poller with 1000us wait time and test single execution */ 358 poller_run = false; 359 poller = spdk_poller_register(poller_run_done, &poller_run, 1000); 360 CU_ASSERT(poller != NULL); 361 362 poll_threads(); 363 CU_ASSERT(poller_run == false); 364 365 increment_time(1000); 366 poll_threads(); 367 CU_ASSERT(poller_run == true); 368 369 reset_time(); 370 poller_run = false; 371 poll_threads(); 372 CU_ASSERT(poller_run == false); 373 374 increment_time(1000); 375 poll_threads(); 376 CU_ASSERT(poller_run == true); 377 378 spdk_poller_unregister(&poller); 379 CU_ASSERT(poller == NULL); 380 381 reset_time(); 382 /* Register a poller with 1000us wait time and test multiple execution */ 383 poller = spdk_poller_register(poller_run_times_done, &poller_run_times, 1000); 384 CU_ASSERT(poller != NULL); 385 386 poll_threads(); 387 CU_ASSERT(poller_run_times == 0); 388 389 increment_time(1000); 390 poll_threads(); 391 CU_ASSERT(poller_run_times == 1); 392 393 poller_run_times = 0; 394 increment_time(2000); 395 poll_threads(); 396 CU_ASSERT(poller_run_times == 2); 397 398 spdk_poller_unregister(&poller); 399 CU_ASSERT(poller == NULL); 400 401 teardown_test(); 402 } 403 404 static void 405 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 406 { 407 bool *done = cb_arg; 408 409 CU_ASSERT(success == true); 410 *done = true; 411 spdk_bdev_free_io(bdev_io); 412 } 413 414 static void 415 put_channel_during_reset(void) 416 { 417 struct spdk_io_channel *io_ch; 418 bool done = false; 419 420 setup_test(); 421 422 set_thread(0); 423 io_ch = spdk_bdev_get_io_channel(g_desc); 424 CU_ASSERT(io_ch != NULL); 425 426 /* 427 * Start a reset, but then put the I/O channel before 428 * the deferred messages for the reset get a chance to 429 * execute. 430 */ 431 spdk_bdev_reset(g_desc, io_ch, reset_done, &done); 432 spdk_put_io_channel(io_ch); 433 poll_threads(); 434 stub_complete_io(g_bdev.io_target, 0); 435 436 teardown_test(); 437 } 438 439 static void 440 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 441 { 442 enum spdk_bdev_io_status *status = cb_arg; 443 444 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 445 spdk_bdev_free_io(bdev_io); 446 } 447 448 static void 449 aborted_reset(void) 450 { 451 struct spdk_io_channel *io_ch[2]; 452 enum spdk_bdev_io_status status1, status2; 453 454 setup_test(); 455 456 set_thread(0); 457 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 458 CU_ASSERT(io_ch[0] != NULL); 459 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); 460 poll_threads(); 461 CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); 462 463 /* 464 * First reset has been submitted on ch0. Now submit a second 465 * reset on ch1 which will get queued since there is already a 466 * reset in progress. 467 */ 468 set_thread(1); 469 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 470 CU_ASSERT(io_ch[1] != NULL); 471 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); 472 poll_threads(); 473 CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); 474 475 /* 476 * Now destroy ch1. This will abort the queued reset. Check that 477 * the second reset was completed with failed status. Also check 478 * that bdev->reset_in_progress != NULL, since the original reset 479 * has not been completed yet. This ensures that the bdev code is 480 * correctly noticing that the failed reset is *not* the one that 481 * had been submitted to the bdev module. 482 */ 483 set_thread(1); 484 spdk_put_io_channel(io_ch[1]); 485 poll_threads(); 486 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED); 487 CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); 488 489 /* 490 * Now complete the first reset, verify that it completed with SUCCESS 491 * status and that bdev->reset_in_progress is also set back to NULL. 492 */ 493 set_thread(0); 494 spdk_put_io_channel(io_ch[0]); 495 stub_complete_io(g_bdev.io_target, 0); 496 poll_threads(); 497 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 498 CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL); 499 500 teardown_test(); 501 } 502 503 static void 504 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 505 { 506 enum spdk_bdev_io_status *status = cb_arg; 507 508 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 509 spdk_bdev_free_io(bdev_io); 510 } 511 512 static void 513 io_during_reset(void) 514 { 515 struct spdk_io_channel *io_ch[2]; 516 struct spdk_bdev_channel *bdev_ch[2]; 517 enum spdk_bdev_io_status status0, status1, status_reset; 518 int rc; 519 520 setup_test(); 521 522 /* 523 * First test normal case - submit an I/O on each of two channels (with no resets) 524 * and verify they complete successfully. 525 */ 526 set_thread(0); 527 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 528 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 529 CU_ASSERT(bdev_ch[0]->flags == 0); 530 status0 = SPDK_BDEV_IO_STATUS_PENDING; 531 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 532 CU_ASSERT(rc == 0); 533 534 set_thread(1); 535 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 536 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 537 CU_ASSERT(bdev_ch[1]->flags == 0); 538 status1 = SPDK_BDEV_IO_STATUS_PENDING; 539 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 540 CU_ASSERT(rc == 0); 541 542 poll_threads(); 543 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 544 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 545 546 set_thread(0); 547 stub_complete_io(g_bdev.io_target, 0); 548 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 549 550 set_thread(1); 551 stub_complete_io(g_bdev.io_target, 0); 552 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 553 554 /* 555 * Now submit a reset, and leave it pending while we submit I/O on two different 556 * channels. These I/O should be failed by the bdev layer since the reset is in 557 * progress. 558 */ 559 set_thread(0); 560 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 561 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset); 562 CU_ASSERT(rc == 0); 563 564 CU_ASSERT(bdev_ch[0]->flags == 0); 565 CU_ASSERT(bdev_ch[1]->flags == 0); 566 poll_threads(); 567 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS); 568 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS); 569 570 set_thread(0); 571 status0 = SPDK_BDEV_IO_STATUS_PENDING; 572 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 573 CU_ASSERT(rc == 0); 574 575 set_thread(1); 576 status1 = SPDK_BDEV_IO_STATUS_PENDING; 577 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 578 CU_ASSERT(rc == 0); 579 580 /* 581 * A reset is in progress so these read I/O should complete with failure. Note that we 582 * need to poll_threads() since I/O completed inline have their completion deferred. 583 */ 584 poll_threads(); 585 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 586 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED); 587 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED); 588 589 /* 590 * Complete the reset 591 */ 592 set_thread(0); 593 stub_complete_io(g_bdev.io_target, 0); 594 595 /* 596 * Only poll thread 0. We should not get a completion. 597 */ 598 poll_thread(0); 599 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 600 601 /* 602 * Poll both thread 0 and 1 so the messages can propagate and we 603 * get a completion. 604 */ 605 poll_threads(); 606 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 607 608 spdk_put_io_channel(io_ch[0]); 609 set_thread(1); 610 spdk_put_io_channel(io_ch[1]); 611 poll_threads(); 612 613 teardown_test(); 614 } 615 616 static void 617 basic_qos(void) 618 { 619 struct spdk_io_channel *io_ch[2]; 620 struct spdk_bdev_channel *bdev_ch[2]; 621 struct spdk_bdev *bdev; 622 enum spdk_bdev_io_status status; 623 int rc; 624 625 setup_test(); 626 627 /* Enable QoS */ 628 bdev = &g_bdev.bdev; 629 bdev->qos = calloc(1, sizeof(*bdev->qos)); 630 SPDK_CU_ASSERT_FATAL(bdev->qos != NULL); 631 TAILQ_INIT(&bdev->qos->queued); 632 /* 633 * Enable both IOPS and bandwidth rate limits. 634 * In this case, both rate limits will take equal effect. 635 */ 636 bdev->qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */ 637 bdev->qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */ 638 639 g_get_io_channel = true; 640 641 set_thread(0); 642 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 643 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 644 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 645 646 set_thread(1); 647 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 648 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 649 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 650 651 /* 652 * Send an I/O on thread 0, which is where the QoS thread is running. 653 */ 654 set_thread(0); 655 status = SPDK_BDEV_IO_STATUS_PENDING; 656 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status); 657 CU_ASSERT(rc == 0); 658 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 659 poll_threads(); 660 stub_complete_io(g_bdev.io_target, 0); 661 poll_threads(); 662 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); 663 664 /* Send an I/O on thread 1. The QoS thread is not running here. */ 665 status = SPDK_BDEV_IO_STATUS_PENDING; 666 set_thread(1); 667 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status); 668 CU_ASSERT(rc == 0); 669 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 670 poll_threads(); 671 /* Complete I/O on thread 1. This should not complete the I/O we submitted */ 672 stub_complete_io(g_bdev.io_target, 0); 673 poll_threads(); 674 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING); 675 /* Now complete I/O on thread 0 */ 676 set_thread(0); 677 poll_threads(); 678 stub_complete_io(g_bdev.io_target, 0); 679 poll_threads(); 680 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS); 681 682 /* Tear down the channels */ 683 set_thread(0); 684 spdk_put_io_channel(io_ch[0]); 685 set_thread(1); 686 spdk_put_io_channel(io_ch[1]); 687 poll_threads(); 688 set_thread(0); 689 690 /* Close the descriptor, which should stop the qos channel */ 691 spdk_bdev_close(g_desc); 692 poll_threads(); 693 CU_ASSERT(bdev->qos->ch == NULL); 694 695 spdk_bdev_open(bdev, true, NULL, NULL, &g_desc); 696 697 /* Create the channels in reverse order. */ 698 set_thread(1); 699 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 700 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 701 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 702 703 set_thread(0); 704 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 705 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 706 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 707 708 /* Confirm that the qos thread is now thread 1 */ 709 CU_ASSERT(bdev->qos->ch == bdev_ch[1]); 710 711 /* Tear down the channels */ 712 set_thread(0); 713 spdk_put_io_channel(io_ch[0]); 714 set_thread(1); 715 spdk_put_io_channel(io_ch[1]); 716 poll_threads(); 717 718 set_thread(0); 719 720 teardown_test(); 721 } 722 723 static void 724 io_during_qos_queue(void) 725 { 726 struct spdk_io_channel *io_ch[2]; 727 struct spdk_bdev_channel *bdev_ch[2]; 728 struct spdk_bdev *bdev; 729 enum spdk_bdev_io_status status0, status1; 730 int rc; 731 732 setup_test(); 733 reset_time(); 734 735 /* Enable QoS */ 736 bdev = &g_bdev.bdev; 737 bdev->qos = calloc(1, sizeof(*bdev->qos)); 738 SPDK_CU_ASSERT_FATAL(bdev->qos != NULL); 739 TAILQ_INIT(&bdev->qos->queued); 740 /* 741 * Enable both IOPS and bandwidth rate limits. 742 * In this case, IOPS rate limit will take effect first. 743 */ 744 bdev->qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */ 745 bdev->qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */ 746 747 g_get_io_channel = true; 748 749 /* Create channels */ 750 set_thread(0); 751 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 752 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 753 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 754 755 set_thread(1); 756 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 757 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 758 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 759 760 /* Send two I/O */ 761 status1 = SPDK_BDEV_IO_STATUS_PENDING; 762 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 763 CU_ASSERT(rc == 0); 764 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 765 set_thread(0); 766 status0 = SPDK_BDEV_IO_STATUS_PENDING; 767 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 768 CU_ASSERT(rc == 0); 769 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 770 771 /* Complete any I/O that arrived at the disk */ 772 poll_threads(); 773 set_thread(1); 774 stub_complete_io(g_bdev.io_target, 0); 775 set_thread(0); 776 stub_complete_io(g_bdev.io_target, 0); 777 poll_threads(); 778 779 /* Only one of the I/O should complete. (logical XOR) */ 780 if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) { 781 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 782 } else { 783 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 784 } 785 786 /* Advance in time by a millisecond */ 787 increment_time(1000); 788 789 /* Complete more I/O */ 790 poll_threads(); 791 set_thread(1); 792 stub_complete_io(g_bdev.io_target, 0); 793 set_thread(0); 794 stub_complete_io(g_bdev.io_target, 0); 795 poll_threads(); 796 797 /* Now the second I/O should be done */ 798 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 799 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 800 801 /* Tear down the channels */ 802 set_thread(1); 803 spdk_put_io_channel(io_ch[1]); 804 set_thread(0); 805 spdk_put_io_channel(io_ch[0]); 806 poll_threads(); 807 808 teardown_test(); 809 } 810 811 static void 812 io_during_qos_reset(void) 813 { 814 struct spdk_io_channel *io_ch[2]; 815 struct spdk_bdev_channel *bdev_ch[2]; 816 struct spdk_bdev *bdev; 817 enum spdk_bdev_io_status status0, status1, reset_status; 818 int rc; 819 820 setup_test(); 821 reset_time(); 822 823 /* Enable QoS */ 824 bdev = &g_bdev.bdev; 825 bdev->qos = calloc(1, sizeof(*bdev->qos)); 826 SPDK_CU_ASSERT_FATAL(bdev->qos != NULL); 827 TAILQ_INIT(&bdev->qos->queued); 828 /* 829 * Enable both IOPS and bandwidth rate limits. 830 * In this case, bandwidth rate limit will take effect first. 831 */ 832 bdev->qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */ 833 bdev->qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */ 834 835 g_get_io_channel = true; 836 837 /* Create channels */ 838 set_thread(0); 839 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 840 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 841 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED); 842 843 set_thread(1); 844 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 845 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 846 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED); 847 848 /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */ 849 status1 = SPDK_BDEV_IO_STATUS_PENDING; 850 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); 851 CU_ASSERT(rc == 0); 852 set_thread(0); 853 status0 = SPDK_BDEV_IO_STATUS_PENDING; 854 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); 855 CU_ASSERT(rc == 0); 856 857 poll_threads(); 858 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 859 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 860 861 /* Reset the bdev. */ 862 reset_status = SPDK_BDEV_IO_STATUS_PENDING; 863 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status); 864 CU_ASSERT(rc == 0); 865 866 /* Complete any I/O that arrived at the disk */ 867 poll_threads(); 868 set_thread(1); 869 stub_complete_io(g_bdev.io_target, 0); 870 set_thread(0); 871 stub_complete_io(g_bdev.io_target, 0); 872 poll_threads(); 873 874 CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS); 875 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED); 876 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED); 877 878 /* Tear down the channels */ 879 set_thread(1); 880 spdk_put_io_channel(io_ch[1]); 881 set_thread(0); 882 spdk_put_io_channel(io_ch[0]); 883 poll_threads(); 884 885 teardown_test(); 886 } 887 888 static void 889 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 890 { 891 enum spdk_bdev_io_status *status = cb_arg; 892 893 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 894 spdk_bdev_free_io(bdev_io); 895 } 896 897 static void 898 enomem(void) 899 { 900 struct spdk_io_channel *io_ch; 901 struct spdk_bdev_channel *bdev_ch; 902 struct spdk_bdev_shared_resource *shared_resource; 903 struct ut_bdev_channel *ut_ch; 904 const uint32_t IO_ARRAY_SIZE = 64; 905 const uint32_t AVAIL = 20; 906 enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset; 907 uint32_t nomem_cnt, i; 908 struct spdk_bdev_io *first_io; 909 int rc; 910 911 setup_test(); 912 913 set_thread(0); 914 io_ch = spdk_bdev_get_io_channel(g_desc); 915 bdev_ch = spdk_io_channel_get_ctx(io_ch); 916 shared_resource = bdev_ch->shared_resource; 917 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 918 ut_ch->avail_cnt = AVAIL; 919 920 /* First submit a number of IOs equal to what the channel can support. */ 921 for (i = 0; i < AVAIL; i++) { 922 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 923 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 924 CU_ASSERT(rc == 0); 925 } 926 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 927 928 /* 929 * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto 930 * the enomem_io list. 931 */ 932 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 933 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 934 CU_ASSERT(rc == 0); 935 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 936 first_io = TAILQ_FIRST(&shared_resource->nomem_io); 937 938 /* 939 * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind 940 * the first_io above. 941 */ 942 for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) { 943 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 944 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 945 CU_ASSERT(rc == 0); 946 } 947 948 /* Assert that first_io is still at the head of the list. */ 949 CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io); 950 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL)); 951 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); 952 CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT)); 953 954 /* 955 * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have 956 * changed since completing just 1 I/O should not trigger retrying the queued nomem_io 957 * list. 958 */ 959 stub_complete_io(g_bdev.io_target, 1); 960 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); 961 962 /* 963 * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io, 964 * and we should see I/O get resubmitted to the test bdev module. 965 */ 966 stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1); 967 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt); 968 nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io); 969 970 /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */ 971 stub_complete_io(g_bdev.io_target, 1); 972 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt); 973 974 /* 975 * Send a reset and confirm that all I/O are completed, including the ones that 976 * were queued on the nomem_io list. 977 */ 978 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 979 rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset); 980 poll_threads(); 981 CU_ASSERT(rc == 0); 982 /* This will complete the reset. */ 983 stub_complete_io(g_bdev.io_target, 0); 984 985 CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0); 986 CU_ASSERT(shared_resource->io_outstanding == 0); 987 988 spdk_put_io_channel(io_ch); 989 poll_threads(); 990 teardown_test(); 991 } 992 993 static void 994 enomem_multi_bdev(void) 995 { 996 struct spdk_io_channel *io_ch; 997 struct spdk_bdev_channel *bdev_ch; 998 struct spdk_bdev_shared_resource *shared_resource; 999 struct ut_bdev_channel *ut_ch; 1000 const uint32_t IO_ARRAY_SIZE = 64; 1001 const uint32_t AVAIL = 20; 1002 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1003 uint32_t i; 1004 struct ut_bdev *second_bdev; 1005 struct spdk_bdev_desc *second_desc; 1006 struct spdk_bdev_channel *second_bdev_ch; 1007 struct spdk_io_channel *second_ch; 1008 int rc; 1009 1010 setup_test(); 1011 1012 /* Register second bdev with the same io_target */ 1013 second_bdev = calloc(1, sizeof(*second_bdev)); 1014 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 1015 register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target); 1016 spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc); 1017 1018 set_thread(0); 1019 io_ch = spdk_bdev_get_io_channel(g_desc); 1020 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1021 shared_resource = bdev_ch->shared_resource; 1022 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1023 ut_ch->avail_cnt = AVAIL; 1024 1025 second_ch = spdk_bdev_get_io_channel(second_desc); 1026 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 1027 SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource); 1028 1029 /* Saturate io_target through bdev A. */ 1030 for (i = 0; i < AVAIL; i++) { 1031 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1032 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1033 CU_ASSERT(rc == 0); 1034 } 1035 CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io)); 1036 1037 /* 1038 * Now submit I/O through the second bdev. This should fail with ENOMEM 1039 * and then go onto the nomem_io list. 1040 */ 1041 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1042 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1043 CU_ASSERT(rc == 0); 1044 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io)); 1045 1046 /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */ 1047 stub_complete_io(g_bdev.io_target, AVAIL); 1048 1049 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io)); 1050 CU_ASSERT(shared_resource->io_outstanding == 1); 1051 1052 /* Now complete our retried I/O */ 1053 stub_complete_io(g_bdev.io_target, 1); 1054 SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0); 1055 1056 spdk_put_io_channel(io_ch); 1057 spdk_put_io_channel(second_ch); 1058 spdk_bdev_close(second_desc); 1059 unregister_bdev(second_bdev); 1060 poll_threads(); 1061 free(second_bdev); 1062 teardown_test(); 1063 } 1064 1065 1066 static void 1067 enomem_multi_io_target(void) 1068 { 1069 struct spdk_io_channel *io_ch; 1070 struct spdk_bdev_channel *bdev_ch; 1071 struct ut_bdev_channel *ut_ch; 1072 const uint32_t IO_ARRAY_SIZE = 64; 1073 const uint32_t AVAIL = 20; 1074 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 1075 uint32_t i; 1076 int new_io_device; 1077 struct ut_bdev *second_bdev; 1078 struct spdk_bdev_desc *second_desc; 1079 struct spdk_bdev_channel *second_bdev_ch; 1080 struct spdk_io_channel *second_ch; 1081 int rc; 1082 1083 setup_test(); 1084 1085 /* Create new io_target and a second bdev using it */ 1086 spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch, 1087 sizeof(struct ut_bdev_channel)); 1088 second_bdev = calloc(1, sizeof(*second_bdev)); 1089 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 1090 register_bdev(second_bdev, "ut_bdev2", &new_io_device); 1091 spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc); 1092 1093 set_thread(0); 1094 io_ch = spdk_bdev_get_io_channel(g_desc); 1095 bdev_ch = spdk_io_channel_get_ctx(io_ch); 1096 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 1097 ut_ch->avail_cnt = AVAIL; 1098 1099 /* Different io_target should imply a different shared_resource */ 1100 second_ch = spdk_bdev_get_io_channel(second_desc); 1101 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 1102 SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource); 1103 1104 /* Saturate io_target through bdev A. */ 1105 for (i = 0; i < AVAIL; i++) { 1106 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 1107 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 1108 CU_ASSERT(rc == 0); 1109 } 1110 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1111 1112 /* Issue one more I/O to fill ENOMEM list. */ 1113 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1114 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1115 CU_ASSERT(rc == 0); 1116 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1117 1118 /* 1119 * Now submit I/O through the second bdev. This should go through and complete 1120 * successfully because we're using a different io_device underneath. 1121 */ 1122 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 1123 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 1124 CU_ASSERT(rc == 0); 1125 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io)); 1126 stub_complete_io(second_bdev->io_target, 1); 1127 1128 /* Cleanup; Complete outstanding I/O. */ 1129 stub_complete_io(g_bdev.io_target, AVAIL); 1130 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1131 /* Complete the ENOMEM I/O */ 1132 stub_complete_io(g_bdev.io_target, 1); 1133 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); 1134 1135 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io)); 1136 CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0); 1137 spdk_put_io_channel(io_ch); 1138 spdk_put_io_channel(second_ch); 1139 spdk_bdev_close(second_desc); 1140 unregister_bdev(second_bdev); 1141 spdk_io_device_unregister(&new_io_device, NULL); 1142 poll_threads(); 1143 free(second_bdev); 1144 teardown_test(); 1145 } 1146 1147 static void 1148 qos_dynamic_enable_done(void *cb_arg, int status) 1149 { 1150 int *rc = cb_arg; 1151 *rc = status; 1152 } 1153 1154 static void 1155 qos_dynamic_enable(void) 1156 { 1157 struct spdk_io_channel *io_ch[2]; 1158 struct spdk_bdev_channel *bdev_ch[2]; 1159 struct spdk_bdev *bdev; 1160 int status, second_status; 1161 1162 setup_test(); 1163 reset_time(); 1164 1165 bdev = &g_bdev.bdev; 1166 1167 g_get_io_channel = true; 1168 1169 /* Create channels */ 1170 set_thread(0); 1171 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 1172 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 1173 CU_ASSERT(bdev_ch[0]->flags == 0); 1174 1175 set_thread(1); 1176 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 1177 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 1178 CU_ASSERT(bdev_ch[1]->flags == 0); 1179 1180 set_thread(0); 1181 1182 /* Enable QoS */ 1183 status = -1; 1184 spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status); 1185 poll_threads(); 1186 CU_ASSERT(status == 0); 1187 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1188 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1189 1190 /* Disable QoS */ 1191 status = -1; 1192 spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status); 1193 poll_threads(); 1194 CU_ASSERT(status == 0); 1195 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1196 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1197 1198 /* Disable QoS again */ 1199 status = -1; 1200 spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status); 1201 poll_threads(); 1202 CU_ASSERT(status == 0); /* This should succeed */ 1203 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1204 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1205 1206 /* Enable QoS on thread 0 */ 1207 status = -1; 1208 spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status); 1209 poll_threads(); 1210 CU_ASSERT(status == 0); 1211 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1212 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1213 1214 /* Disable QoS on thread 1 */ 1215 set_thread(1); 1216 status = -1; 1217 spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status); 1218 /* Don't poll yet. This should leave the channels with QoS enabled */ 1219 CU_ASSERT(status == -1); 1220 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1221 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1222 1223 /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */ 1224 second_status = 0; 1225 spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &second_status); 1226 poll_threads(); 1227 CU_ASSERT(status == 0); /* The disable should succeed */ 1228 CU_ASSERT(second_status < 0); /* The enable should fail */ 1229 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0); 1230 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0); 1231 1232 /* Enable QoS on thread 1. This should succeed now that the disable has completed. */ 1233 status = -1; 1234 spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status); 1235 poll_threads(); 1236 CU_ASSERT(status == 0); 1237 CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0); 1238 CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0); 1239 1240 /* Tear down the channels */ 1241 set_thread(0); 1242 spdk_put_io_channel(io_ch[0]); 1243 set_thread(1); 1244 spdk_put_io_channel(io_ch[1]); 1245 poll_threads(); 1246 1247 set_thread(0); 1248 teardown_test(); 1249 } 1250 1251 int 1252 main(int argc, char **argv) 1253 { 1254 CU_pSuite suite = NULL; 1255 unsigned int num_failures; 1256 1257 if (CU_initialize_registry() != CUE_SUCCESS) { 1258 return CU_get_error(); 1259 } 1260 1261 suite = CU_add_suite("bdev", NULL, NULL); 1262 if (suite == NULL) { 1263 CU_cleanup_registry(); 1264 return CU_get_error(); 1265 } 1266 1267 if ( 1268 CU_add_test(suite, "basic", basic) == NULL || 1269 CU_add_test(suite, "basic_poller", basic_poller) == NULL || 1270 CU_add_test(suite, "basic_qos", basic_qos) == NULL || 1271 CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL || 1272 CU_add_test(suite, "aborted_reset", aborted_reset) == NULL || 1273 CU_add_test(suite, "io_during_reset", io_during_reset) == NULL || 1274 CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL || 1275 CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL || 1276 CU_add_test(suite, "enomem", enomem) == NULL || 1277 CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL || 1278 CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL || 1279 CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL 1280 ) { 1281 CU_cleanup_registry(); 1282 return CU_get_error(); 1283 } 1284 1285 CU_basic_set_mode(CU_BRM_VERBOSE); 1286 CU_basic_run_tests(); 1287 num_failures = CU_get_number_of_failures(); 1288 CU_cleanup_registry(); 1289 return num_failures; 1290 } 1291