1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "lib/test_env.c" 37 #include "lib/ut_multithread.c" 38 39 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 40 #undef SPDK_CONFIG_VTUNE 41 42 #include "bdev/bdev.c" 43 44 #define BDEV_UT_NUM_THREADS 3 45 46 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, 47 int *sc, int *sk, int *asc, int *ascq)); 48 49 struct ut_bdev { 50 struct spdk_bdev bdev; 51 void *io_target; 52 }; 53 54 struct ut_bdev_channel { 55 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 56 uint32_t outstanding_cnt; 57 uint32_t avail_cnt; 58 }; 59 60 int g_io_device; 61 struct ut_bdev g_bdev; 62 struct spdk_bdev_desc *g_desc; 63 bool g_teardown_done = false; 64 bool g_get_io_channel = true; 65 bool g_create_ch = true; 66 67 static int 68 stub_create_ch(void *io_device, void *ctx_buf) 69 { 70 struct ut_bdev_channel *ch = ctx_buf; 71 72 if (g_create_ch == false) { 73 return -1; 74 } 75 76 TAILQ_INIT(&ch->outstanding_io); 77 ch->outstanding_cnt = 0; 78 /* 79 * When avail gets to 0, the submit_request function will return ENOMEM. 80 * Most tests to not want ENOMEM to occur, so by default set this to a 81 * big value that won't get hit. The ENOMEM tests can then override this 82 * value to something much smaller to induce ENOMEM conditions. 83 */ 84 ch->avail_cnt = 2048; 85 return 0; 86 } 87 88 static void 89 stub_destroy_ch(void *io_device, void *ctx_buf) 90 { 91 } 92 93 static struct spdk_io_channel * 94 stub_get_io_channel(void *ctx) 95 { 96 struct ut_bdev *ut_bdev = ctx; 97 98 if (g_get_io_channel == true) { 99 return spdk_get_io_channel(ut_bdev->io_target); 100 } else { 101 return NULL; 102 } 103 } 104 105 static int 106 stub_destruct(void *ctx) 107 { 108 return 0; 109 } 110 111 static void 112 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 113 { 114 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 115 116 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { 117 struct spdk_bdev_io *io; 118 119 while (!TAILQ_EMPTY(&ch->outstanding_io)) { 120 io = TAILQ_FIRST(&ch->outstanding_io); 121 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 122 ch->outstanding_cnt--; 123 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED); 124 ch->avail_cnt++; 125 } 126 } 127 128 if (ch->avail_cnt > 0) { 129 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 130 ch->outstanding_cnt++; 131 ch->avail_cnt--; 132 } else { 133 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 134 } 135 } 136 137 static uint32_t 138 stub_complete_io(void *io_target, uint32_t num_to_complete) 139 { 140 struct spdk_io_channel *_ch = spdk_get_io_channel(io_target); 141 struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch); 142 struct spdk_bdev_io *io; 143 bool complete_all = (num_to_complete == 0); 144 uint32_t num_completed = 0; 145 146 while (complete_all || num_completed < num_to_complete) { 147 if (TAILQ_EMPTY(&ch->outstanding_io)) { 148 break; 149 } 150 io = TAILQ_FIRST(&ch->outstanding_io); 151 TAILQ_REMOVE(&ch->outstanding_io, io, module_link); 152 ch->outstanding_cnt--; 153 spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS); 154 ch->avail_cnt++; 155 num_completed++; 156 } 157 158 spdk_put_io_channel(_ch); 159 return num_completed; 160 } 161 162 static struct spdk_bdev_fn_table fn_table = { 163 .get_io_channel = stub_get_io_channel, 164 .destruct = stub_destruct, 165 .submit_request = stub_submit_request, 166 }; 167 168 static int 169 module_init(void) 170 { 171 return 0; 172 } 173 174 static void 175 module_fini(void) 176 { 177 } 178 179 SPDK_BDEV_MODULE_REGISTER(bdev_ut, module_init, module_fini, NULL, NULL, NULL) 180 181 static void 182 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target) 183 { 184 memset(ut_bdev, 0, sizeof(*ut_bdev)); 185 186 ut_bdev->io_target = io_target; 187 ut_bdev->bdev.ctxt = ut_bdev; 188 ut_bdev->bdev.name = name; 189 ut_bdev->bdev.fn_table = &fn_table; 190 ut_bdev->bdev.module = SPDK_GET_BDEV_MODULE(bdev_ut); 191 ut_bdev->bdev.blocklen = 4096; 192 ut_bdev->bdev.blockcnt = 1024; 193 194 spdk_bdev_register(&ut_bdev->bdev); 195 } 196 197 static void 198 unregister_bdev(struct ut_bdev *ut_bdev) 199 { 200 /* Handle any deferred messages. */ 201 poll_threads(); 202 spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL); 203 memset(ut_bdev, 0, sizeof(*ut_bdev)); 204 } 205 206 static void 207 bdev_init_cb(void *done, int rc) 208 { 209 CU_ASSERT(rc == 0); 210 *(bool *)done = true; 211 } 212 213 static void 214 setup_test(void) 215 { 216 bool done = false; 217 218 allocate_threads(BDEV_UT_NUM_THREADS); 219 spdk_bdev_initialize(bdev_init_cb, &done); 220 spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch, 221 sizeof(struct ut_bdev_channel)); 222 register_bdev(&g_bdev, "ut_bdev", &g_io_device); 223 spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc); 224 } 225 226 static void 227 finish_cb(void *cb_arg) 228 { 229 g_teardown_done = true; 230 } 231 232 static void 233 teardown_test(void) 234 { 235 g_teardown_done = false; 236 spdk_bdev_close(g_desc); 237 g_desc = NULL; 238 unregister_bdev(&g_bdev); 239 spdk_io_device_unregister(&g_io_device, NULL); 240 spdk_bdev_finish(finish_cb, NULL); 241 poll_threads(); 242 CU_ASSERT(g_teardown_done == true); 243 g_teardown_done = false; 244 free_threads(); 245 } 246 247 static void 248 basic(void) 249 { 250 setup_test(); 251 252 set_thread(0); 253 254 g_get_io_channel = false; 255 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 256 CU_ASSERT(g_ut_threads[0].ch == NULL); 257 258 g_get_io_channel = true; 259 g_create_ch = false; 260 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 261 CU_ASSERT(g_ut_threads[0].ch == NULL); 262 263 g_get_io_channel = true; 264 g_create_ch = true; 265 g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc); 266 CU_ASSERT(g_ut_threads[0].ch != NULL); 267 spdk_put_io_channel(g_ut_threads[0].ch); 268 269 teardown_test(); 270 } 271 272 static void 273 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 274 { 275 bool *done = cb_arg; 276 277 CU_ASSERT(success == true); 278 *done = true; 279 spdk_bdev_free_io(bdev_io); 280 } 281 282 static void 283 put_channel_during_reset(void) 284 { 285 struct spdk_io_channel *io_ch; 286 bool done = false; 287 288 setup_test(); 289 290 set_thread(0); 291 io_ch = spdk_bdev_get_io_channel(g_desc); 292 CU_ASSERT(io_ch != NULL); 293 294 /* 295 * Start a reset, but then put the I/O channel before 296 * the deferred messages for the reset get a chance to 297 * execute. 298 */ 299 spdk_bdev_reset(g_desc, io_ch, reset_done, &done); 300 spdk_put_io_channel(io_ch); 301 poll_threads(); 302 stub_complete_io(g_bdev.io_target, 0); 303 304 teardown_test(); 305 } 306 307 static void 308 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 309 { 310 enum spdk_bdev_io_status *status = cb_arg; 311 312 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 313 spdk_bdev_free_io(bdev_io); 314 } 315 316 static void 317 aborted_reset(void) 318 { 319 struct spdk_io_channel *io_ch[2]; 320 enum spdk_bdev_io_status status1, status2; 321 322 setup_test(); 323 324 set_thread(0); 325 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 326 CU_ASSERT(io_ch[0] != NULL); 327 spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); 328 poll_threads(); 329 CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); 330 331 /* 332 * First reset has been submitted on ch0. Now submit a second 333 * reset on ch1 which will get queued since there is already a 334 * reset in progress. 335 */ 336 set_thread(1); 337 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 338 CU_ASSERT(io_ch[1] != NULL); 339 spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); 340 poll_threads(); 341 CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); 342 343 /* 344 * Now destroy ch1. This will abort the queued reset. Check that 345 * the second reset was completed with failed status. Also check 346 * that bdev->reset_in_progress != NULL, since the original reset 347 * has not been completed yet. This ensures that the bdev code is 348 * correctly noticing that the failed reset is *not* the one that 349 * had been submitted to the bdev module. 350 */ 351 set_thread(1); 352 spdk_put_io_channel(io_ch[1]); 353 poll_threads(); 354 CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED); 355 CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL); 356 357 /* 358 * Now complete the first reset, verify that it completed with SUCCESS 359 * status and that bdev->reset_in_progress is also set back to NULL. 360 */ 361 set_thread(0); 362 spdk_put_io_channel(io_ch[0]); 363 stub_complete_io(g_bdev.io_target, 0); 364 poll_threads(); 365 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 366 CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL); 367 368 teardown_test(); 369 } 370 371 static void 372 io_during_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 373 { 374 enum spdk_bdev_io_status *status = cb_arg; 375 376 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 377 spdk_bdev_free_io(bdev_io); 378 } 379 380 static void 381 io_during_reset(void) 382 { 383 struct spdk_io_channel *io_ch[2]; 384 struct spdk_bdev_channel *bdev_ch[2]; 385 enum spdk_bdev_io_status status0, status1, status_reset; 386 int rc; 387 388 setup_test(); 389 390 /* 391 * First test normal case - submit an I/O on each of two channels (with no resets) 392 * and verify they complete successfully. 393 */ 394 set_thread(0); 395 io_ch[0] = spdk_bdev_get_io_channel(g_desc); 396 bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); 397 CU_ASSERT(bdev_ch[0]->flags == 0); 398 status0 = SPDK_BDEV_IO_STATUS_PENDING; 399 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0); 400 CU_ASSERT(rc == 0); 401 402 set_thread(1); 403 io_ch[1] = spdk_bdev_get_io_channel(g_desc); 404 bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); 405 CU_ASSERT(bdev_ch[1]->flags == 0); 406 status1 = SPDK_BDEV_IO_STATUS_PENDING; 407 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1); 408 CU_ASSERT(rc == 0); 409 410 poll_threads(); 411 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); 412 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); 413 414 set_thread(0); 415 stub_complete_io(g_bdev.io_target, 0); 416 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); 417 418 set_thread(1); 419 stub_complete_io(g_bdev.io_target, 0); 420 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); 421 422 /* 423 * Now submit a reset, and leave it pending while we submit I/O on two different 424 * channels. These I/O should be failed by the bdev layer since the reset is in 425 * progress. 426 */ 427 set_thread(0); 428 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 429 rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_reset_done, &status_reset); 430 CU_ASSERT(rc == 0); 431 432 CU_ASSERT(bdev_ch[0]->flags == 0); 433 CU_ASSERT(bdev_ch[1]->flags == 0); 434 poll_threads(); 435 CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS); 436 CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS); 437 438 set_thread(0); 439 status0 = SPDK_BDEV_IO_STATUS_PENDING; 440 rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0); 441 CU_ASSERT(rc == 0); 442 443 set_thread(1); 444 status1 = SPDK_BDEV_IO_STATUS_PENDING; 445 rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1); 446 CU_ASSERT(rc == 0); 447 448 /* 449 * A reset is in progress so these read I/O should complete with failure. Note that we 450 * need to poll_threads() since I/O completed inline have their completion deferred. 451 */ 452 poll_threads(); 453 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 454 CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED); 455 CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED); 456 457 /* 458 * Complete the reset 459 */ 460 set_thread(0); 461 stub_complete_io(g_bdev.io_target, 0); 462 463 /* 464 * Only poll thread 0. We should not get a completion. 465 */ 466 poll_thread(0); 467 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); 468 469 /* 470 * Poll both thread 0 and 1 so the messages can propagate and we 471 * get a completion. 472 */ 473 poll_threads(); 474 CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); 475 476 spdk_put_io_channel(io_ch[0]); 477 set_thread(1); 478 spdk_put_io_channel(io_ch[1]); 479 poll_threads(); 480 481 teardown_test(); 482 } 483 484 static void 485 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 486 { 487 enum spdk_bdev_io_status *status = cb_arg; 488 489 *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 490 spdk_bdev_free_io(bdev_io); 491 } 492 493 static uint32_t 494 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq) 495 { 496 struct spdk_bdev_io *io; 497 uint32_t cnt = 0; 498 499 TAILQ_FOREACH(io, tailq, link) { 500 cnt++; 501 } 502 503 return cnt; 504 } 505 506 static void 507 enomem(void) 508 { 509 struct spdk_io_channel *io_ch; 510 struct spdk_bdev_channel *bdev_ch; 511 struct spdk_bdev_module_channel *module_ch; 512 struct ut_bdev_channel *ut_ch; 513 const uint32_t IO_ARRAY_SIZE = 64; 514 const uint32_t AVAIL = 20; 515 enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset; 516 uint32_t nomem_cnt, i; 517 struct spdk_bdev_io *first_io; 518 int rc; 519 520 setup_test(); 521 522 set_thread(0); 523 io_ch = spdk_bdev_get_io_channel(g_desc); 524 bdev_ch = spdk_io_channel_get_ctx(io_ch); 525 module_ch = bdev_ch->module_ch; 526 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 527 ut_ch->avail_cnt = AVAIL; 528 529 /* First submit a number of IOs equal to what the channel can support. */ 530 for (i = 0; i < AVAIL; i++) { 531 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 532 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 533 CU_ASSERT(rc == 0); 534 } 535 CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io)); 536 537 /* 538 * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto 539 * the enomem_io list. 540 */ 541 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 542 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 543 CU_ASSERT(rc == 0); 544 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io)); 545 first_io = TAILQ_FIRST(&module_ch->nomem_io); 546 547 /* 548 * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind 549 * the first_io above. 550 */ 551 for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) { 552 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 553 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 554 CU_ASSERT(rc == 0); 555 } 556 557 /* Assert that first_io is still at the head of the list. */ 558 CU_ASSERT(TAILQ_FIRST(&module_ch->nomem_io) == first_io); 559 CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL)); 560 nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io); 561 CU_ASSERT(module_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT)); 562 563 /* 564 * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have 565 * changed since completing just 1 I/O should not trigger retrying the queued nomem_io 566 * list. 567 */ 568 stub_complete_io(g_bdev.io_target, 1); 569 CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt); 570 571 /* 572 * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io, 573 * and we should see I/O get resubmitted to the test bdev module. 574 */ 575 stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1); 576 CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) < nomem_cnt); 577 nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io); 578 579 /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */ 580 stub_complete_io(g_bdev.io_target, 1); 581 CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt); 582 583 /* 584 * Send a reset and confirm that all I/O are completed, including the ones that 585 * were queued on the nomem_io list. 586 */ 587 status_reset = SPDK_BDEV_IO_STATUS_PENDING; 588 rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset); 589 poll_threads(); 590 CU_ASSERT(rc == 0); 591 /* This will complete the reset. */ 592 stub_complete_io(g_bdev.io_target, 0); 593 594 CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == 0); 595 CU_ASSERT(module_ch->io_outstanding == 0); 596 597 spdk_put_io_channel(io_ch); 598 poll_threads(); 599 teardown_test(); 600 } 601 602 static void 603 enomem_multi_bdev(void) 604 { 605 struct spdk_io_channel *io_ch; 606 struct spdk_bdev_channel *bdev_ch; 607 struct spdk_bdev_module_channel *module_ch; 608 struct ut_bdev_channel *ut_ch; 609 const uint32_t IO_ARRAY_SIZE = 64; 610 const uint32_t AVAIL = 20; 611 enum spdk_bdev_io_status status[IO_ARRAY_SIZE]; 612 uint32_t i; 613 struct ut_bdev *second_bdev; 614 struct spdk_bdev_desc *second_desc; 615 struct spdk_bdev_channel *second_bdev_ch; 616 struct spdk_io_channel *second_ch; 617 int rc; 618 619 setup_test(); 620 621 /* Register second bdev with the same io_target */ 622 second_bdev = calloc(1, sizeof(*second_bdev)); 623 SPDK_CU_ASSERT_FATAL(second_bdev != NULL); 624 register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target); 625 spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc); 626 627 set_thread(0); 628 io_ch = spdk_bdev_get_io_channel(g_desc); 629 bdev_ch = spdk_io_channel_get_ctx(io_ch); 630 module_ch = bdev_ch->module_ch; 631 ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); 632 ut_ch->avail_cnt = AVAIL; 633 634 second_ch = spdk_bdev_get_io_channel(second_desc); 635 second_bdev_ch = spdk_io_channel_get_ctx(second_ch); 636 SPDK_CU_ASSERT_FATAL(module_ch == second_bdev_ch->module_ch); 637 638 /* Saturate io_target through bdev A. */ 639 for (i = 0; i < AVAIL; i++) { 640 status[i] = SPDK_BDEV_IO_STATUS_PENDING; 641 rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); 642 CU_ASSERT(rc == 0); 643 } 644 CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io)); 645 646 /* 647 * Now submit I/O through the second bdev. This should fail with ENOMEM 648 * and then go onto the nomem_io list. 649 */ 650 status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; 651 rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); 652 CU_ASSERT(rc == 0); 653 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io)); 654 655 /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */ 656 stub_complete_io(g_bdev.io_target, AVAIL); 657 658 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&module_ch->nomem_io)); 659 CU_ASSERT(module_ch->io_outstanding == 1); 660 661 /* Now complete our retried I/O */ 662 stub_complete_io(g_bdev.io_target, 1); 663 SPDK_CU_ASSERT_FATAL(module_ch->io_outstanding == 0); 664 665 spdk_put_io_channel(io_ch); 666 spdk_put_io_channel(second_ch); 667 spdk_bdev_close(second_desc); 668 unregister_bdev(second_bdev); 669 free(second_bdev); 670 poll_threads(); 671 teardown_test(); 672 } 673 674 int 675 main(int argc, char **argv) 676 { 677 CU_pSuite suite = NULL; 678 unsigned int num_failures; 679 680 if (CU_initialize_registry() != CUE_SUCCESS) { 681 return CU_get_error(); 682 } 683 684 suite = CU_add_suite("bdev", NULL, NULL); 685 if (suite == NULL) { 686 CU_cleanup_registry(); 687 return CU_get_error(); 688 } 689 690 if ( 691 CU_add_test(suite, "basic", basic) == NULL || 692 CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL || 693 CU_add_test(suite, "aborted_reset", aborted_reset) == NULL || 694 CU_add_test(suite, "io_during_reset", io_during_reset) == NULL || 695 CU_add_test(suite, "enomem", enomem) == NULL || 696 CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL 697 ) { 698 CU_cleanup_registry(); 699 return CU_get_error(); 700 } 701 702 CU_basic_set_mode(CU_BRM_VERBOSE); 703 CU_basic_run_tests(); 704 num_failures = CU_get_number_of_failures(); 705 CU_cleanup_registry(); 706 return num_failures; 707 } 708