1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 struct spdk_trace_histories *g_trace_histories; 46 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 47 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 48 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 49 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 50 uint16_t tpoint_id, uint8_t owner_type, 51 uint8_t object_type, uint8_t new_object, 52 uint8_t arg1_type, const char *arg1_name)); 53 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 54 uint32_t size, uint64_t object_id, uint64_t arg1)); 55 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 56 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 57 58 59 int g_status; 60 int g_count; 61 enum spdk_bdev_event_type g_event_type1; 62 enum spdk_bdev_event_type g_event_type2; 63 struct spdk_histogram_data *g_histogram; 64 void *g_unregister_arg; 65 int g_unregister_rc; 66 67 void 68 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 69 int *sc, int *sk, int *asc, int *ascq) 70 { 71 } 72 73 static int 74 null_init(void) 75 { 76 return 0; 77 } 78 79 static int 80 null_clean(void) 81 { 82 return 0; 83 } 84 85 static int 86 stub_destruct(void *ctx) 87 { 88 return 0; 89 } 90 91 struct ut_expected_io { 92 uint8_t type; 93 uint64_t offset; 94 uint64_t length; 95 int iovcnt; 96 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 97 void *md_buf; 98 TAILQ_ENTRY(ut_expected_io) link; 99 }; 100 101 struct bdev_ut_channel { 102 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 103 uint32_t outstanding_io_count; 104 TAILQ_HEAD(, ut_expected_io) expected_io; 105 }; 106 107 static bool g_io_done; 108 static struct spdk_bdev_io *g_bdev_io; 109 static enum spdk_bdev_io_status g_io_status; 110 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 111 static uint32_t g_bdev_ut_io_device; 112 static struct bdev_ut_channel *g_bdev_ut_channel; 113 static void *g_compare_read_buf; 114 static uint32_t g_compare_read_buf_len; 115 static void *g_compare_write_buf; 116 static uint32_t g_compare_write_buf_len; 117 static bool g_abort_done; 118 static enum spdk_bdev_io_status g_abort_status; 119 120 static struct ut_expected_io * 121 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 122 { 123 struct ut_expected_io *expected_io; 124 125 expected_io = calloc(1, sizeof(*expected_io)); 126 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 127 128 expected_io->type = type; 129 expected_io->offset = offset; 130 expected_io->length = length; 131 expected_io->iovcnt = iovcnt; 132 133 return expected_io; 134 } 135 136 static void 137 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 138 { 139 expected_io->iov[pos].iov_base = base; 140 expected_io->iov[pos].iov_len = len; 141 } 142 143 static void 144 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 145 { 146 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 147 struct ut_expected_io *expected_io; 148 struct iovec *iov, *expected_iov; 149 struct spdk_bdev_io *bio_to_abort; 150 int i; 151 152 g_bdev_io = bdev_io; 153 154 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 155 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 156 157 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 158 CU_ASSERT(g_compare_read_buf_len == len); 159 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 160 } 161 162 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 163 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 164 165 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 166 CU_ASSERT(g_compare_write_buf_len == len); 167 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 168 } 169 170 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 171 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 172 173 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 174 CU_ASSERT(g_compare_read_buf_len == len); 175 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 176 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 177 } 178 } 179 180 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 181 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 182 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 183 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 184 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 185 ch->outstanding_io_count--; 186 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 187 break; 188 } 189 } 190 } 191 } 192 193 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 194 ch->outstanding_io_count++; 195 196 expected_io = TAILQ_FIRST(&ch->expected_io); 197 if (expected_io == NULL) { 198 return; 199 } 200 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 201 202 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 203 CU_ASSERT(bdev_io->type == expected_io->type); 204 } 205 206 if (expected_io->md_buf != NULL) { 207 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 208 } 209 210 if (expected_io->length == 0) { 211 free(expected_io); 212 return; 213 } 214 215 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 216 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 217 218 if (expected_io->iovcnt == 0) { 219 free(expected_io); 220 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 221 return; 222 } 223 224 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 225 for (i = 0; i < expected_io->iovcnt; i++) { 226 iov = &bdev_io->u.bdev.iovs[i]; 227 expected_iov = &expected_io->iov[i]; 228 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 229 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 230 } 231 232 free(expected_io); 233 } 234 235 static void 236 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 237 struct spdk_bdev_io *bdev_io, bool success) 238 { 239 CU_ASSERT(success == true); 240 241 stub_submit_request(_ch, bdev_io); 242 } 243 244 static void 245 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 246 { 247 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 248 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 249 } 250 251 static uint32_t 252 stub_complete_io(uint32_t num_to_complete) 253 { 254 struct bdev_ut_channel *ch = g_bdev_ut_channel; 255 struct spdk_bdev_io *bdev_io; 256 static enum spdk_bdev_io_status io_status; 257 uint32_t num_completed = 0; 258 259 while (num_completed < num_to_complete) { 260 if (TAILQ_EMPTY(&ch->outstanding_io)) { 261 break; 262 } 263 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 264 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 265 ch->outstanding_io_count--; 266 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 267 g_io_exp_status; 268 spdk_bdev_io_complete(bdev_io, io_status); 269 num_completed++; 270 } 271 272 return num_completed; 273 } 274 275 static struct spdk_io_channel * 276 bdev_ut_get_io_channel(void *ctx) 277 { 278 return spdk_get_io_channel(&g_bdev_ut_io_device); 279 } 280 281 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 282 [SPDK_BDEV_IO_TYPE_READ] = true, 283 [SPDK_BDEV_IO_TYPE_WRITE] = true, 284 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 285 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 286 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 287 [SPDK_BDEV_IO_TYPE_RESET] = true, 288 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 289 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 290 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 291 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 292 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 293 [SPDK_BDEV_IO_TYPE_ABORT] = true, 294 }; 295 296 static void 297 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 298 { 299 g_io_types_supported[io_type] = enable; 300 } 301 302 static bool 303 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 304 { 305 return g_io_types_supported[io_type]; 306 } 307 308 static struct spdk_bdev_fn_table fn_table = { 309 .destruct = stub_destruct, 310 .submit_request = stub_submit_request, 311 .get_io_channel = bdev_ut_get_io_channel, 312 .io_type_supported = stub_io_type_supported, 313 }; 314 315 static int 316 bdev_ut_create_ch(void *io_device, void *ctx_buf) 317 { 318 struct bdev_ut_channel *ch = ctx_buf; 319 320 CU_ASSERT(g_bdev_ut_channel == NULL); 321 g_bdev_ut_channel = ch; 322 323 TAILQ_INIT(&ch->outstanding_io); 324 ch->outstanding_io_count = 0; 325 TAILQ_INIT(&ch->expected_io); 326 return 0; 327 } 328 329 static void 330 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 331 { 332 CU_ASSERT(g_bdev_ut_channel != NULL); 333 g_bdev_ut_channel = NULL; 334 } 335 336 struct spdk_bdev_module bdev_ut_if; 337 338 static int 339 bdev_ut_module_init(void) 340 { 341 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 342 sizeof(struct bdev_ut_channel), NULL); 343 spdk_bdev_module_init_done(&bdev_ut_if); 344 return 0; 345 } 346 347 static void 348 bdev_ut_module_fini(void) 349 { 350 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 351 } 352 353 struct spdk_bdev_module bdev_ut_if = { 354 .name = "bdev_ut", 355 .module_init = bdev_ut_module_init, 356 .module_fini = bdev_ut_module_fini, 357 .async_init = true, 358 }; 359 360 static void vbdev_ut_examine(struct spdk_bdev *bdev); 361 362 static int 363 vbdev_ut_module_init(void) 364 { 365 return 0; 366 } 367 368 static void 369 vbdev_ut_module_fini(void) 370 { 371 } 372 373 struct spdk_bdev_module vbdev_ut_if = { 374 .name = "vbdev_ut", 375 .module_init = vbdev_ut_module_init, 376 .module_fini = vbdev_ut_module_fini, 377 .examine_config = vbdev_ut_examine, 378 }; 379 380 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 381 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 382 383 static void 384 vbdev_ut_examine(struct spdk_bdev *bdev) 385 { 386 spdk_bdev_module_examine_done(&vbdev_ut_if); 387 } 388 389 static struct spdk_bdev * 390 allocate_bdev(char *name) 391 { 392 struct spdk_bdev *bdev; 393 int rc; 394 395 bdev = calloc(1, sizeof(*bdev)); 396 SPDK_CU_ASSERT_FATAL(bdev != NULL); 397 398 bdev->name = name; 399 bdev->fn_table = &fn_table; 400 bdev->module = &bdev_ut_if; 401 bdev->blockcnt = 1024; 402 bdev->blocklen = 512; 403 404 rc = spdk_bdev_register(bdev); 405 CU_ASSERT(rc == 0); 406 407 return bdev; 408 } 409 410 static struct spdk_bdev * 411 allocate_vbdev(char *name) 412 { 413 struct spdk_bdev *bdev; 414 int rc; 415 416 bdev = calloc(1, sizeof(*bdev)); 417 SPDK_CU_ASSERT_FATAL(bdev != NULL); 418 419 bdev->name = name; 420 bdev->fn_table = &fn_table; 421 bdev->module = &vbdev_ut_if; 422 423 rc = spdk_bdev_register(bdev); 424 CU_ASSERT(rc == 0); 425 426 return bdev; 427 } 428 429 static void 430 free_bdev(struct spdk_bdev *bdev) 431 { 432 spdk_bdev_unregister(bdev, NULL, NULL); 433 poll_threads(); 434 memset(bdev, 0xFF, sizeof(*bdev)); 435 free(bdev); 436 } 437 438 static void 439 free_vbdev(struct spdk_bdev *bdev) 440 { 441 spdk_bdev_unregister(bdev, NULL, NULL); 442 poll_threads(); 443 memset(bdev, 0xFF, sizeof(*bdev)); 444 free(bdev); 445 } 446 447 static void 448 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 449 { 450 const char *bdev_name; 451 452 CU_ASSERT(bdev != NULL); 453 CU_ASSERT(rc == 0); 454 bdev_name = spdk_bdev_get_name(bdev); 455 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 456 457 free(stat); 458 free_bdev(bdev); 459 460 *(bool *)cb_arg = true; 461 } 462 463 static void 464 bdev_unregister_cb(void *cb_arg, int rc) 465 { 466 g_unregister_arg = cb_arg; 467 g_unregister_rc = rc; 468 } 469 470 static void 471 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 472 { 473 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 474 475 g_event_type1 = type; 476 if (SPDK_BDEV_EVENT_REMOVE == type) { 477 spdk_bdev_close(desc); 478 } 479 } 480 481 static void 482 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 483 { 484 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 485 486 g_event_type2 = type; 487 if (SPDK_BDEV_EVENT_REMOVE == type) { 488 spdk_bdev_close(desc); 489 } 490 } 491 492 static void 493 get_device_stat_test(void) 494 { 495 struct spdk_bdev *bdev; 496 struct spdk_bdev_io_stat *stat; 497 bool done; 498 499 bdev = allocate_bdev("bdev0"); 500 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 501 if (stat == NULL) { 502 free_bdev(bdev); 503 return; 504 } 505 506 done = false; 507 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 508 while (!done) { poll_threads(); } 509 510 511 } 512 513 static void 514 open_write_test(void) 515 { 516 struct spdk_bdev *bdev[9]; 517 struct spdk_bdev_desc *desc[9] = {}; 518 int rc; 519 520 /* 521 * Create a tree of bdevs to test various open w/ write cases. 522 * 523 * bdev0 through bdev3 are physical block devices, such as NVMe 524 * namespaces or Ceph block devices. 525 * 526 * bdev4 is a virtual bdev with multiple base bdevs. This models 527 * caching or RAID use cases. 528 * 529 * bdev5 through bdev7 are all virtual bdevs with the same base 530 * bdev (except bdev7). This models partitioning or logical volume 531 * use cases. 532 * 533 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 534 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 535 * models caching, RAID, partitioning or logical volumes use cases. 536 * 537 * bdev8 is a virtual bdev with multiple base bdevs, but these 538 * base bdevs are themselves virtual bdevs. 539 * 540 * bdev8 541 * | 542 * +----------+ 543 * | | 544 * bdev4 bdev5 bdev6 bdev7 545 * | | | | 546 * +---+---+ +---+ + +---+---+ 547 * | | \ | / \ 548 * bdev0 bdev1 bdev2 bdev3 549 */ 550 551 bdev[0] = allocate_bdev("bdev0"); 552 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 553 CU_ASSERT(rc == 0); 554 555 bdev[1] = allocate_bdev("bdev1"); 556 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 557 CU_ASSERT(rc == 0); 558 559 bdev[2] = allocate_bdev("bdev2"); 560 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 561 CU_ASSERT(rc == 0); 562 563 bdev[3] = allocate_bdev("bdev3"); 564 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 565 CU_ASSERT(rc == 0); 566 567 bdev[4] = allocate_vbdev("bdev4"); 568 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 569 CU_ASSERT(rc == 0); 570 571 bdev[5] = allocate_vbdev("bdev5"); 572 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 573 CU_ASSERT(rc == 0); 574 575 bdev[6] = allocate_vbdev("bdev6"); 576 577 bdev[7] = allocate_vbdev("bdev7"); 578 579 bdev[8] = allocate_vbdev("bdev8"); 580 581 /* Open bdev0 read-only. This should succeed. */ 582 rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]); 583 CU_ASSERT(rc == 0); 584 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 585 spdk_bdev_close(desc[0]); 586 587 /* 588 * Open bdev1 read/write. This should fail since bdev1 has been claimed 589 * by a vbdev module. 590 */ 591 rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]); 592 CU_ASSERT(rc == -EPERM); 593 594 /* 595 * Open bdev4 read/write. This should fail since bdev3 has been claimed 596 * by a vbdev module. 597 */ 598 rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]); 599 CU_ASSERT(rc == -EPERM); 600 601 /* Open bdev4 read-only. This should succeed. */ 602 rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]); 603 CU_ASSERT(rc == 0); 604 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 605 spdk_bdev_close(desc[4]); 606 607 /* 608 * Open bdev8 read/write. This should succeed since it is a leaf 609 * bdev. 610 */ 611 rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]); 612 CU_ASSERT(rc == 0); 613 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 614 spdk_bdev_close(desc[8]); 615 616 /* 617 * Open bdev5 read/write. This should fail since bdev4 has been claimed 618 * by a vbdev module. 619 */ 620 rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]); 621 CU_ASSERT(rc == -EPERM); 622 623 /* Open bdev4 read-only. This should succeed. */ 624 rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]); 625 CU_ASSERT(rc == 0); 626 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 627 spdk_bdev_close(desc[5]); 628 629 free_vbdev(bdev[8]); 630 631 free_vbdev(bdev[5]); 632 free_vbdev(bdev[6]); 633 free_vbdev(bdev[7]); 634 635 free_vbdev(bdev[4]); 636 637 free_bdev(bdev[0]); 638 free_bdev(bdev[1]); 639 free_bdev(bdev[2]); 640 free_bdev(bdev[3]); 641 } 642 643 static void 644 bytes_to_blocks_test(void) 645 { 646 struct spdk_bdev bdev; 647 uint64_t offset_blocks, num_blocks; 648 649 memset(&bdev, 0, sizeof(bdev)); 650 651 bdev.blocklen = 512; 652 653 /* All parameters valid */ 654 offset_blocks = 0; 655 num_blocks = 0; 656 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 657 CU_ASSERT(offset_blocks == 1); 658 CU_ASSERT(num_blocks == 2); 659 660 /* Offset not a block multiple */ 661 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 662 663 /* Length not a block multiple */ 664 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 665 666 /* In case blocklen not the power of two */ 667 bdev.blocklen = 100; 668 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 669 CU_ASSERT(offset_blocks == 1); 670 CU_ASSERT(num_blocks == 2); 671 672 /* Offset not a block multiple */ 673 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 674 675 /* Length not a block multiple */ 676 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 677 } 678 679 static void 680 num_blocks_test(void) 681 { 682 struct spdk_bdev bdev; 683 struct spdk_bdev_desc *desc = NULL; 684 struct spdk_bdev_desc *desc_ext = NULL; 685 int rc; 686 687 memset(&bdev, 0, sizeof(bdev)); 688 bdev.name = "num_blocks"; 689 bdev.fn_table = &fn_table; 690 bdev.module = &bdev_ut_if; 691 spdk_bdev_register(&bdev); 692 spdk_bdev_notify_blockcnt_change(&bdev, 50); 693 694 /* Growing block number */ 695 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 696 /* Shrinking block number */ 697 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 698 699 /* In case bdev opened */ 700 rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc); 701 CU_ASSERT(rc == 0); 702 SPDK_CU_ASSERT_FATAL(desc != NULL); 703 704 /* Growing block number */ 705 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 706 /* Shrinking block number */ 707 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 708 709 /* In case bdev opened with ext API */ 710 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext); 711 CU_ASSERT(rc == 0); 712 SPDK_CU_ASSERT_FATAL(desc_ext != NULL); 713 714 g_event_type1 = 0xFF; 715 /* Growing block number */ 716 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 717 718 poll_threads(); 719 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 720 721 g_event_type1 = 0xFF; 722 /* Growing block number and closing */ 723 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 724 725 spdk_bdev_close(desc); 726 spdk_bdev_close(desc_ext); 727 spdk_bdev_unregister(&bdev, NULL, NULL); 728 729 poll_threads(); 730 731 /* Callback is not called for closed device */ 732 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 733 } 734 735 static void 736 io_valid_test(void) 737 { 738 struct spdk_bdev bdev; 739 740 memset(&bdev, 0, sizeof(bdev)); 741 742 bdev.blocklen = 512; 743 spdk_bdev_notify_blockcnt_change(&bdev, 100); 744 745 /* All parameters valid */ 746 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 747 748 /* Last valid block */ 749 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 750 751 /* Offset past end of bdev */ 752 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 753 754 /* Offset + length past end of bdev */ 755 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 756 757 /* Offset near end of uint64_t range (2^64 - 1) */ 758 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 759 } 760 761 static void 762 alias_add_del_test(void) 763 { 764 struct spdk_bdev *bdev[3]; 765 int rc; 766 767 /* Creating and registering bdevs */ 768 bdev[0] = allocate_bdev("bdev0"); 769 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 770 771 bdev[1] = allocate_bdev("bdev1"); 772 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 773 774 bdev[2] = allocate_bdev("bdev2"); 775 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 776 777 poll_threads(); 778 779 /* 780 * Trying adding an alias identical to name. 781 * Alias is identical to name, so it can not be added to aliases list 782 */ 783 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 784 CU_ASSERT(rc == -EEXIST); 785 786 /* 787 * Trying to add empty alias, 788 * this one should fail 789 */ 790 rc = spdk_bdev_alias_add(bdev[0], NULL); 791 CU_ASSERT(rc == -EINVAL); 792 793 /* Trying adding same alias to two different registered bdevs */ 794 795 /* Alias is used first time, so this one should pass */ 796 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 797 CU_ASSERT(rc == 0); 798 799 /* Alias was added to another bdev, so this one should fail */ 800 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 801 CU_ASSERT(rc == -EEXIST); 802 803 /* Alias is used first time, so this one should pass */ 804 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 805 CU_ASSERT(rc == 0); 806 807 /* Trying removing an alias from registered bdevs */ 808 809 /* Alias is not on a bdev aliases list, so this one should fail */ 810 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 811 CU_ASSERT(rc == -ENOENT); 812 813 /* Alias is present on a bdev aliases list, so this one should pass */ 814 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 815 CU_ASSERT(rc == 0); 816 817 /* Alias is present on a bdev aliases list, so this one should pass */ 818 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 819 CU_ASSERT(rc == 0); 820 821 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 822 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 823 CU_ASSERT(rc != 0); 824 825 /* Trying to del all alias from empty alias list */ 826 spdk_bdev_alias_del_all(bdev[2]); 827 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 828 829 /* Trying to del all alias from non-empty alias list */ 830 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 831 CU_ASSERT(rc == 0); 832 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 833 CU_ASSERT(rc == 0); 834 spdk_bdev_alias_del_all(bdev[2]); 835 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 836 837 /* Unregister and free bdevs */ 838 spdk_bdev_unregister(bdev[0], NULL, NULL); 839 spdk_bdev_unregister(bdev[1], NULL, NULL); 840 spdk_bdev_unregister(bdev[2], NULL, NULL); 841 842 poll_threads(); 843 844 free(bdev[0]); 845 free(bdev[1]); 846 free(bdev[2]); 847 } 848 849 static void 850 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 851 { 852 g_io_done = true; 853 g_io_status = bdev_io->internal.status; 854 spdk_bdev_free_io(bdev_io); 855 } 856 857 static void 858 bdev_init_cb(void *arg, int rc) 859 { 860 CU_ASSERT(rc == 0); 861 } 862 863 static void 864 bdev_fini_cb(void *arg) 865 { 866 } 867 868 struct bdev_ut_io_wait_entry { 869 struct spdk_bdev_io_wait_entry entry; 870 struct spdk_io_channel *io_ch; 871 struct spdk_bdev_desc *desc; 872 bool submitted; 873 }; 874 875 static void 876 io_wait_cb(void *arg) 877 { 878 struct bdev_ut_io_wait_entry *entry = arg; 879 int rc; 880 881 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 882 CU_ASSERT(rc == 0); 883 entry->submitted = true; 884 } 885 886 static void 887 bdev_io_types_test(void) 888 { 889 struct spdk_bdev *bdev; 890 struct spdk_bdev_desc *desc = NULL; 891 struct spdk_io_channel *io_ch; 892 struct spdk_bdev_opts bdev_opts = { 893 .bdev_io_pool_size = 4, 894 .bdev_io_cache_size = 2, 895 }; 896 int rc; 897 898 rc = spdk_bdev_set_opts(&bdev_opts); 899 CU_ASSERT(rc == 0); 900 spdk_bdev_initialize(bdev_init_cb, NULL); 901 poll_threads(); 902 903 bdev = allocate_bdev("bdev0"); 904 905 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 906 CU_ASSERT(rc == 0); 907 poll_threads(); 908 SPDK_CU_ASSERT_FATAL(desc != NULL); 909 io_ch = spdk_bdev_get_io_channel(desc); 910 CU_ASSERT(io_ch != NULL); 911 912 /* WRITE and WRITE ZEROES are not supported */ 913 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 914 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 915 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 916 CU_ASSERT(rc == -ENOTSUP); 917 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 918 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 919 920 spdk_put_io_channel(io_ch); 921 spdk_bdev_close(desc); 922 free_bdev(bdev); 923 spdk_bdev_finish(bdev_fini_cb, NULL); 924 poll_threads(); 925 } 926 927 static void 928 bdev_io_wait_test(void) 929 { 930 struct spdk_bdev *bdev; 931 struct spdk_bdev_desc *desc = NULL; 932 struct spdk_io_channel *io_ch; 933 struct spdk_bdev_opts bdev_opts = { 934 .bdev_io_pool_size = 4, 935 .bdev_io_cache_size = 2, 936 }; 937 struct bdev_ut_io_wait_entry io_wait_entry; 938 struct bdev_ut_io_wait_entry io_wait_entry2; 939 int rc; 940 941 rc = spdk_bdev_set_opts(&bdev_opts); 942 CU_ASSERT(rc == 0); 943 spdk_bdev_initialize(bdev_init_cb, NULL); 944 poll_threads(); 945 946 bdev = allocate_bdev("bdev0"); 947 948 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 949 CU_ASSERT(rc == 0); 950 poll_threads(); 951 SPDK_CU_ASSERT_FATAL(desc != NULL); 952 io_ch = spdk_bdev_get_io_channel(desc); 953 CU_ASSERT(io_ch != NULL); 954 955 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 956 CU_ASSERT(rc == 0); 957 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 958 CU_ASSERT(rc == 0); 959 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 960 CU_ASSERT(rc == 0); 961 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 962 CU_ASSERT(rc == 0); 963 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 964 965 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 966 CU_ASSERT(rc == -ENOMEM); 967 968 io_wait_entry.entry.bdev = bdev; 969 io_wait_entry.entry.cb_fn = io_wait_cb; 970 io_wait_entry.entry.cb_arg = &io_wait_entry; 971 io_wait_entry.io_ch = io_ch; 972 io_wait_entry.desc = desc; 973 io_wait_entry.submitted = false; 974 /* Cannot use the same io_wait_entry for two different calls. */ 975 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 976 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 977 978 /* Queue two I/O waits. */ 979 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 980 CU_ASSERT(rc == 0); 981 CU_ASSERT(io_wait_entry.submitted == false); 982 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 983 CU_ASSERT(rc == 0); 984 CU_ASSERT(io_wait_entry2.submitted == false); 985 986 stub_complete_io(1); 987 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 988 CU_ASSERT(io_wait_entry.submitted == true); 989 CU_ASSERT(io_wait_entry2.submitted == false); 990 991 stub_complete_io(1); 992 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 993 CU_ASSERT(io_wait_entry2.submitted == true); 994 995 stub_complete_io(4); 996 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 997 998 spdk_put_io_channel(io_ch); 999 spdk_bdev_close(desc); 1000 free_bdev(bdev); 1001 spdk_bdev_finish(bdev_fini_cb, NULL); 1002 poll_threads(); 1003 } 1004 1005 static void 1006 bdev_io_spans_boundary_test(void) 1007 { 1008 struct spdk_bdev bdev; 1009 struct spdk_bdev_io bdev_io; 1010 1011 memset(&bdev, 0, sizeof(bdev)); 1012 1013 bdev.optimal_io_boundary = 0; 1014 bdev_io.bdev = &bdev; 1015 1016 /* bdev has no optimal_io_boundary set - so this should return false. */ 1017 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1018 1019 bdev.optimal_io_boundary = 32; 1020 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1021 1022 /* RESETs are not based on LBAs - so this should return false. */ 1023 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1024 1025 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1026 bdev_io.u.bdev.offset_blocks = 0; 1027 bdev_io.u.bdev.num_blocks = 32; 1028 1029 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1030 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1031 1032 bdev_io.u.bdev.num_blocks = 33; 1033 1034 /* This I/O spans a boundary. */ 1035 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1036 } 1037 1038 static void 1039 bdev_io_split_test(void) 1040 { 1041 struct spdk_bdev *bdev; 1042 struct spdk_bdev_desc *desc = NULL; 1043 struct spdk_io_channel *io_ch; 1044 struct spdk_bdev_opts bdev_opts = { 1045 .bdev_io_pool_size = 512, 1046 .bdev_io_cache_size = 64, 1047 }; 1048 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1049 struct ut_expected_io *expected_io; 1050 uint64_t i; 1051 int rc; 1052 1053 rc = spdk_bdev_set_opts(&bdev_opts); 1054 CU_ASSERT(rc == 0); 1055 spdk_bdev_initialize(bdev_init_cb, NULL); 1056 1057 bdev = allocate_bdev("bdev0"); 1058 1059 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1060 CU_ASSERT(rc == 0); 1061 SPDK_CU_ASSERT_FATAL(desc != NULL); 1062 io_ch = spdk_bdev_get_io_channel(desc); 1063 CU_ASSERT(io_ch != NULL); 1064 1065 bdev->optimal_io_boundary = 16; 1066 bdev->split_on_optimal_io_boundary = false; 1067 1068 g_io_done = false; 1069 1070 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1071 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1072 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1073 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1074 1075 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1076 CU_ASSERT(rc == 0); 1077 CU_ASSERT(g_io_done == false); 1078 1079 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1080 stub_complete_io(1); 1081 CU_ASSERT(g_io_done == true); 1082 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1083 1084 bdev->split_on_optimal_io_boundary = true; 1085 1086 /* Now test that a single-vector command is split correctly. 1087 * Offset 14, length 8, payload 0xF000 1088 * Child - Offset 14, length 2, payload 0xF000 1089 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1090 * 1091 * Set up the expected values before calling spdk_bdev_read_blocks 1092 */ 1093 g_io_done = false; 1094 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1095 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1096 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1097 1098 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1099 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1100 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1101 1102 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1103 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1104 CU_ASSERT(rc == 0); 1105 CU_ASSERT(g_io_done == false); 1106 1107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1108 stub_complete_io(2); 1109 CU_ASSERT(g_io_done == true); 1110 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1111 1112 /* Now set up a more complex, multi-vector command that needs to be split, 1113 * including splitting iovecs. 1114 */ 1115 iov[0].iov_base = (void *)0x10000; 1116 iov[0].iov_len = 512; 1117 iov[1].iov_base = (void *)0x20000; 1118 iov[1].iov_len = 20 * 512; 1119 iov[2].iov_base = (void *)0x30000; 1120 iov[2].iov_len = 11 * 512; 1121 1122 g_io_done = false; 1123 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1124 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1125 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1126 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1127 1128 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1129 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1130 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1131 1132 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1133 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1134 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1135 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1136 1137 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1138 CU_ASSERT(rc == 0); 1139 CU_ASSERT(g_io_done == false); 1140 1141 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1142 stub_complete_io(3); 1143 CU_ASSERT(g_io_done == true); 1144 1145 /* Test multi vector command that needs to be split by strip and then needs to be 1146 * split further due to the capacity of child iovs. 1147 */ 1148 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1149 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1150 iov[i].iov_len = 512; 1151 } 1152 1153 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1154 g_io_done = false; 1155 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1156 BDEV_IO_NUM_CHILD_IOV); 1157 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1158 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1159 } 1160 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1161 1162 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1163 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1164 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1165 ut_expected_io_set_iov(expected_io, i, 1166 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1167 } 1168 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1169 1170 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1171 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1172 CU_ASSERT(rc == 0); 1173 CU_ASSERT(g_io_done == false); 1174 1175 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1176 stub_complete_io(1); 1177 CU_ASSERT(g_io_done == false); 1178 1179 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1180 stub_complete_io(1); 1181 CU_ASSERT(g_io_done == true); 1182 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1183 1184 /* Test multi vector command that needs to be split by strip and then needs to be 1185 * split further due to the capacity of child iovs. In this case, the length of 1186 * the rest of iovec array with an I/O boundary is the multiple of block size. 1187 */ 1188 1189 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1190 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1191 */ 1192 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1193 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1194 iov[i].iov_len = 512; 1195 } 1196 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1197 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1198 iov[i].iov_len = 256; 1199 } 1200 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1201 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1202 1203 /* Add an extra iovec to trigger split */ 1204 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1205 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1206 1207 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1208 g_io_done = false; 1209 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1210 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1211 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1212 ut_expected_io_set_iov(expected_io, i, 1213 (void *)((i + 1) * 0x10000), 512); 1214 } 1215 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1216 ut_expected_io_set_iov(expected_io, i, 1217 (void *)((i + 1) * 0x10000), 256); 1218 } 1219 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1220 1221 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1222 1, 1); 1223 ut_expected_io_set_iov(expected_io, 0, 1224 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1225 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1226 1227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1228 1, 1); 1229 ut_expected_io_set_iov(expected_io, 0, 1230 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1231 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1232 1233 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 1234 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1235 CU_ASSERT(rc == 0); 1236 CU_ASSERT(g_io_done == false); 1237 1238 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1239 stub_complete_io(1); 1240 CU_ASSERT(g_io_done == false); 1241 1242 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1243 stub_complete_io(2); 1244 CU_ASSERT(g_io_done == true); 1245 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1246 1247 /* Test multi vector command that needs to be split by strip and then needs to be 1248 * split further due to the capacity of child iovs, the child request offset should 1249 * be rewind to last aligned offset and go success without error. 1250 */ 1251 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1252 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1253 iov[i].iov_len = 512; 1254 } 1255 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1256 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1257 1258 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1259 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1260 1261 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1262 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1263 1264 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1265 g_io_done = false; 1266 g_io_status = 0; 1267 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1268 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1269 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1270 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1271 ut_expected_io_set_iov(expected_io, i, 1272 (void *)((i + 1) * 0x10000), 512); 1273 } 1274 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1275 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1276 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1277 1, 2); 1278 ut_expected_io_set_iov(expected_io, 0, 1279 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1280 ut_expected_io_set_iov(expected_io, 1, 1281 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1282 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1283 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1284 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1285 1, 1); 1286 ut_expected_io_set_iov(expected_io, 0, 1287 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1288 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1289 1290 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1291 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1292 CU_ASSERT(rc == 0); 1293 CU_ASSERT(g_io_done == false); 1294 1295 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1296 stub_complete_io(1); 1297 CU_ASSERT(g_io_done == false); 1298 1299 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1300 stub_complete_io(2); 1301 CU_ASSERT(g_io_done == true); 1302 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1303 1304 /* Test multi vector command that needs to be split due to the IO boundary and 1305 * the capacity of child iovs. Especially test the case when the command is 1306 * split due to the capacity of child iovs, the tail address is not aligned with 1307 * block size and is rewinded to the aligned address. 1308 * 1309 * The iovecs used in read request is complex but is based on the data 1310 * collected in the real issue. We change the base addresses but keep the lengths 1311 * not to loose the credibility of the test. 1312 */ 1313 bdev->optimal_io_boundary = 128; 1314 g_io_done = false; 1315 g_io_status = 0; 1316 1317 for (i = 0; i < 31; i++) { 1318 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1319 iov[i].iov_len = 1024; 1320 } 1321 iov[31].iov_base = (void *)0xFEED1F00000; 1322 iov[31].iov_len = 32768; 1323 iov[32].iov_base = (void *)0xFEED2000000; 1324 iov[32].iov_len = 160; 1325 iov[33].iov_base = (void *)0xFEED2100000; 1326 iov[33].iov_len = 4096; 1327 iov[34].iov_base = (void *)0xFEED2200000; 1328 iov[34].iov_len = 4096; 1329 iov[35].iov_base = (void *)0xFEED2300000; 1330 iov[35].iov_len = 4096; 1331 iov[36].iov_base = (void *)0xFEED2400000; 1332 iov[36].iov_len = 4096; 1333 iov[37].iov_base = (void *)0xFEED2500000; 1334 iov[37].iov_len = 4096; 1335 iov[38].iov_base = (void *)0xFEED2600000; 1336 iov[38].iov_len = 4096; 1337 iov[39].iov_base = (void *)0xFEED2700000; 1338 iov[39].iov_len = 4096; 1339 iov[40].iov_base = (void *)0xFEED2800000; 1340 iov[40].iov_len = 4096; 1341 iov[41].iov_base = (void *)0xFEED2900000; 1342 iov[41].iov_len = 4096; 1343 iov[42].iov_base = (void *)0xFEED2A00000; 1344 iov[42].iov_len = 4096; 1345 iov[43].iov_base = (void *)0xFEED2B00000; 1346 iov[43].iov_len = 12288; 1347 iov[44].iov_base = (void *)0xFEED2C00000; 1348 iov[44].iov_len = 8192; 1349 iov[45].iov_base = (void *)0xFEED2F00000; 1350 iov[45].iov_len = 4096; 1351 iov[46].iov_base = (void *)0xFEED3000000; 1352 iov[46].iov_len = 4096; 1353 iov[47].iov_base = (void *)0xFEED3100000; 1354 iov[47].iov_len = 4096; 1355 iov[48].iov_base = (void *)0xFEED3200000; 1356 iov[48].iov_len = 24576; 1357 iov[49].iov_base = (void *)0xFEED3300000; 1358 iov[49].iov_len = 16384; 1359 iov[50].iov_base = (void *)0xFEED3400000; 1360 iov[50].iov_len = 12288; 1361 iov[51].iov_base = (void *)0xFEED3500000; 1362 iov[51].iov_len = 4096; 1363 iov[52].iov_base = (void *)0xFEED3600000; 1364 iov[52].iov_len = 4096; 1365 iov[53].iov_base = (void *)0xFEED3700000; 1366 iov[53].iov_len = 4096; 1367 iov[54].iov_base = (void *)0xFEED3800000; 1368 iov[54].iov_len = 28672; 1369 iov[55].iov_base = (void *)0xFEED3900000; 1370 iov[55].iov_len = 20480; 1371 iov[56].iov_base = (void *)0xFEED3A00000; 1372 iov[56].iov_len = 4096; 1373 iov[57].iov_base = (void *)0xFEED3B00000; 1374 iov[57].iov_len = 12288; 1375 iov[58].iov_base = (void *)0xFEED3C00000; 1376 iov[58].iov_len = 4096; 1377 iov[59].iov_base = (void *)0xFEED3D00000; 1378 iov[59].iov_len = 4096; 1379 iov[60].iov_base = (void *)0xFEED3E00000; 1380 iov[60].iov_len = 352; 1381 1382 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1383 * of child iovs, 1384 */ 1385 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1386 for (i = 0; i < 32; i++) { 1387 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1388 } 1389 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1390 1391 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1392 * split by the IO boundary requirement. 1393 */ 1394 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1395 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1396 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1397 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1398 1399 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1400 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1401 */ 1402 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1403 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1404 iov[33].iov_len - 864); 1405 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1406 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1407 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1408 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1409 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1410 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1411 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1412 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1413 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1414 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1415 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1416 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1417 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1418 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1419 1420 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1421 * first 864 bytes of iov[52] split by the IO boundary requirement. 1422 */ 1423 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1424 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1425 iov[46].iov_len - 864); 1426 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1427 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1428 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1429 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1430 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1431 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1432 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1433 1434 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1435 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1436 */ 1437 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1438 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1439 iov[52].iov_len - 864); 1440 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1441 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1442 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1443 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1444 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1445 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1446 1447 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1448 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1449 */ 1450 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1451 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1452 iov[57].iov_len - 4960); 1453 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1454 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1455 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1456 1457 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1459 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1460 iov[59].iov_len - 3936); 1461 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1462 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1463 1464 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 61, 0, 543, io_done, NULL); 1465 CU_ASSERT(rc == 0); 1466 CU_ASSERT(g_io_done == false); 1467 1468 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1469 stub_complete_io(1); 1470 CU_ASSERT(g_io_done == false); 1471 1472 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1473 stub_complete_io(5); 1474 CU_ASSERT(g_io_done == false); 1475 1476 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1477 stub_complete_io(1); 1478 CU_ASSERT(g_io_done == true); 1479 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1480 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1481 1482 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1483 * split, so test that. 1484 */ 1485 bdev->optimal_io_boundary = 15; 1486 g_io_done = false; 1487 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1489 1490 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1491 CU_ASSERT(rc == 0); 1492 CU_ASSERT(g_io_done == false); 1493 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1494 stub_complete_io(1); 1495 CU_ASSERT(g_io_done == true); 1496 1497 /* Test an UNMAP. This should also not be split. */ 1498 bdev->optimal_io_boundary = 16; 1499 g_io_done = false; 1500 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1501 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1502 1503 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1504 CU_ASSERT(rc == 0); 1505 CU_ASSERT(g_io_done == false); 1506 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1507 stub_complete_io(1); 1508 CU_ASSERT(g_io_done == true); 1509 1510 /* Test a FLUSH. This should also not be split. */ 1511 bdev->optimal_io_boundary = 16; 1512 g_io_done = false; 1513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1514 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1515 1516 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1517 CU_ASSERT(rc == 0); 1518 CU_ASSERT(g_io_done == false); 1519 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1520 stub_complete_io(1); 1521 CU_ASSERT(g_io_done == true); 1522 1523 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1524 1525 /* Children requests return an error status */ 1526 bdev->optimal_io_boundary = 16; 1527 iov[0].iov_base = (void *)0x10000; 1528 iov[0].iov_len = 512 * 64; 1529 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1530 g_io_done = false; 1531 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1532 1533 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1534 CU_ASSERT(rc == 0); 1535 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1536 stub_complete_io(4); 1537 CU_ASSERT(g_io_done == false); 1538 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1539 stub_complete_io(1); 1540 CU_ASSERT(g_io_done == true); 1541 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1542 1543 /* Test if a multi vector command terminated with failure before continueing 1544 * splitting process when one of child I/O failed. 1545 * The multi vector command is as same as the above that needs to be split by strip 1546 * and then needs to be split further due to the capacity of child iovs. 1547 */ 1548 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1549 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1550 iov[i].iov_len = 512; 1551 } 1552 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1553 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1554 1555 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1556 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1557 1558 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1559 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1560 1561 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1562 1563 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1564 g_io_done = false; 1565 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1566 1567 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1568 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1569 CU_ASSERT(rc == 0); 1570 CU_ASSERT(g_io_done == false); 1571 1572 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1573 stub_complete_io(1); 1574 CU_ASSERT(g_io_done == true); 1575 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1576 1577 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1578 1579 /* for this test we will create the following conditions to hit the code path where 1580 * we are trying to send and IO following a split that has no iovs because we had to 1581 * trim them for alignment reasons. 1582 * 1583 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1584 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1585 * position 30 and overshoot by 0x2e. 1586 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1587 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1588 * which eliniates that vector so we just send the first split IO with 30 vectors 1589 * and let the completion pick up the last 2 vectors. 1590 */ 1591 bdev->optimal_io_boundary = 32; 1592 bdev->split_on_optimal_io_boundary = true; 1593 g_io_done = false; 1594 1595 /* Init all parent IOVs to 0x212 */ 1596 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1597 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1598 iov[i].iov_len = 0x212; 1599 } 1600 1601 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1602 BDEV_IO_NUM_CHILD_IOV - 1); 1603 /* expect 0-29 to be 1:1 with the parent iov */ 1604 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1605 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1606 } 1607 1608 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1609 * where 0x1e is the amount we overshot the 16K boundary 1610 */ 1611 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1612 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1614 1615 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1616 * shortened that take it to the next boundary and then a final one to get us to 1617 * 0x4200 bytes for the IO. 1618 */ 1619 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1620 BDEV_IO_NUM_CHILD_IOV, 2); 1621 /* position 30 picked up the remaining bytes to the next boundary */ 1622 ut_expected_io_set_iov(expected_io, 0, 1623 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1624 1625 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1626 ut_expected_io_set_iov(expected_io, 1, 1627 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1628 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1629 1630 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1631 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1632 CU_ASSERT(rc == 0); 1633 CU_ASSERT(g_io_done == false); 1634 1635 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1636 stub_complete_io(1); 1637 CU_ASSERT(g_io_done == false); 1638 1639 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1640 stub_complete_io(1); 1641 CU_ASSERT(g_io_done == true); 1642 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1643 1644 spdk_put_io_channel(io_ch); 1645 spdk_bdev_close(desc); 1646 free_bdev(bdev); 1647 spdk_bdev_finish(bdev_fini_cb, NULL); 1648 poll_threads(); 1649 } 1650 1651 static void 1652 bdev_io_split_with_io_wait(void) 1653 { 1654 struct spdk_bdev *bdev; 1655 struct spdk_bdev_desc *desc = NULL; 1656 struct spdk_io_channel *io_ch; 1657 struct spdk_bdev_channel *channel; 1658 struct spdk_bdev_mgmt_channel *mgmt_ch; 1659 struct spdk_bdev_opts bdev_opts = { 1660 .bdev_io_pool_size = 2, 1661 .bdev_io_cache_size = 1, 1662 }; 1663 struct iovec iov[3]; 1664 struct ut_expected_io *expected_io; 1665 int rc; 1666 1667 rc = spdk_bdev_set_opts(&bdev_opts); 1668 CU_ASSERT(rc == 0); 1669 spdk_bdev_initialize(bdev_init_cb, NULL); 1670 1671 bdev = allocate_bdev("bdev0"); 1672 1673 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1674 CU_ASSERT(rc == 0); 1675 CU_ASSERT(desc != NULL); 1676 io_ch = spdk_bdev_get_io_channel(desc); 1677 CU_ASSERT(io_ch != NULL); 1678 channel = spdk_io_channel_get_ctx(io_ch); 1679 mgmt_ch = channel->shared_resource->mgmt_ch; 1680 1681 bdev->optimal_io_boundary = 16; 1682 bdev->split_on_optimal_io_boundary = true; 1683 1684 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1685 CU_ASSERT(rc == 0); 1686 1687 /* Now test that a single-vector command is split correctly. 1688 * Offset 14, length 8, payload 0xF000 1689 * Child - Offset 14, length 2, payload 0xF000 1690 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1691 * 1692 * Set up the expected values before calling spdk_bdev_read_blocks 1693 */ 1694 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1695 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1696 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1697 1698 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1699 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1700 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1701 1702 /* The following children will be submitted sequentially due to the capacity of 1703 * spdk_bdev_io. 1704 */ 1705 1706 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 1707 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1708 CU_ASSERT(rc == 0); 1709 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1710 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1711 1712 /* Completing the first read I/O will submit the first child */ 1713 stub_complete_io(1); 1714 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1715 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1716 1717 /* Completing the first child will submit the second child */ 1718 stub_complete_io(1); 1719 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1720 1721 /* Complete the second child I/O. This should result in our callback getting 1722 * invoked since the parent I/O is now complete. 1723 */ 1724 stub_complete_io(1); 1725 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1726 1727 /* Now set up a more complex, multi-vector command that needs to be split, 1728 * including splitting iovecs. 1729 */ 1730 iov[0].iov_base = (void *)0x10000; 1731 iov[0].iov_len = 512; 1732 iov[1].iov_base = (void *)0x20000; 1733 iov[1].iov_len = 20 * 512; 1734 iov[2].iov_base = (void *)0x30000; 1735 iov[2].iov_len = 11 * 512; 1736 1737 g_io_done = false; 1738 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1739 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1740 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1741 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1742 1743 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1744 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1745 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1746 1747 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1748 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1749 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1750 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1751 1752 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1753 CU_ASSERT(rc == 0); 1754 CU_ASSERT(g_io_done == false); 1755 1756 /* The following children will be submitted sequentially due to the capacity of 1757 * spdk_bdev_io. 1758 */ 1759 1760 /* Completing the first child will submit the second child */ 1761 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1762 stub_complete_io(1); 1763 CU_ASSERT(g_io_done == false); 1764 1765 /* Completing the second child will submit the third child */ 1766 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1767 stub_complete_io(1); 1768 CU_ASSERT(g_io_done == false); 1769 1770 /* Completing the third child will result in our callback getting invoked 1771 * since the parent I/O is now complete. 1772 */ 1773 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1774 stub_complete_io(1); 1775 CU_ASSERT(g_io_done == true); 1776 1777 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1778 1779 spdk_put_io_channel(io_ch); 1780 spdk_bdev_close(desc); 1781 free_bdev(bdev); 1782 spdk_bdev_finish(bdev_fini_cb, NULL); 1783 poll_threads(); 1784 } 1785 1786 static void 1787 bdev_io_alignment(void) 1788 { 1789 struct spdk_bdev *bdev; 1790 struct spdk_bdev_desc *desc = NULL; 1791 struct spdk_io_channel *io_ch; 1792 struct spdk_bdev_opts bdev_opts = { 1793 .bdev_io_pool_size = 20, 1794 .bdev_io_cache_size = 2, 1795 }; 1796 int rc; 1797 void *buf = NULL; 1798 struct iovec iovs[2]; 1799 int iovcnt; 1800 uint64_t alignment; 1801 1802 rc = spdk_bdev_set_opts(&bdev_opts); 1803 CU_ASSERT(rc == 0); 1804 spdk_bdev_initialize(bdev_init_cb, NULL); 1805 1806 fn_table.submit_request = stub_submit_request_get_buf; 1807 bdev = allocate_bdev("bdev0"); 1808 1809 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1810 CU_ASSERT(rc == 0); 1811 CU_ASSERT(desc != NULL); 1812 io_ch = spdk_bdev_get_io_channel(desc); 1813 CU_ASSERT(io_ch != NULL); 1814 1815 /* Create aligned buffer */ 1816 rc = posix_memalign(&buf, 4096, 8192); 1817 SPDK_CU_ASSERT_FATAL(rc == 0); 1818 1819 /* Pass aligned single buffer with no alignment required */ 1820 alignment = 1; 1821 bdev->required_alignment = spdk_u32log2(alignment); 1822 1823 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1824 CU_ASSERT(rc == 0); 1825 stub_complete_io(1); 1826 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1827 alignment)); 1828 1829 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1830 CU_ASSERT(rc == 0); 1831 stub_complete_io(1); 1832 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1833 alignment)); 1834 1835 /* Pass unaligned single buffer with no alignment required */ 1836 alignment = 1; 1837 bdev->required_alignment = spdk_u32log2(alignment); 1838 1839 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1840 CU_ASSERT(rc == 0); 1841 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1842 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1843 stub_complete_io(1); 1844 1845 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1846 CU_ASSERT(rc == 0); 1847 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1848 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1849 stub_complete_io(1); 1850 1851 /* Pass unaligned single buffer with 512 alignment required */ 1852 alignment = 512; 1853 bdev->required_alignment = spdk_u32log2(alignment); 1854 1855 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1856 CU_ASSERT(rc == 0); 1857 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1858 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1859 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1860 alignment)); 1861 stub_complete_io(1); 1862 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1863 1864 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1865 CU_ASSERT(rc == 0); 1866 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1867 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1868 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1869 alignment)); 1870 stub_complete_io(1); 1871 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1872 1873 /* Pass unaligned single buffer with 4096 alignment required */ 1874 alignment = 4096; 1875 bdev->required_alignment = spdk_u32log2(alignment); 1876 1877 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1878 CU_ASSERT(rc == 0); 1879 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1880 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1881 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1882 alignment)); 1883 stub_complete_io(1); 1884 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1885 1886 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1887 CU_ASSERT(rc == 0); 1888 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1889 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1890 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1891 alignment)); 1892 stub_complete_io(1); 1893 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1894 1895 /* Pass aligned iovs with no alignment required */ 1896 alignment = 1; 1897 bdev->required_alignment = spdk_u32log2(alignment); 1898 1899 iovcnt = 1; 1900 iovs[0].iov_base = buf; 1901 iovs[0].iov_len = 512; 1902 1903 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1904 CU_ASSERT(rc == 0); 1905 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1906 stub_complete_io(1); 1907 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1908 1909 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1910 CU_ASSERT(rc == 0); 1911 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1912 stub_complete_io(1); 1913 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1914 1915 /* Pass unaligned iovs with no alignment required */ 1916 alignment = 1; 1917 bdev->required_alignment = spdk_u32log2(alignment); 1918 1919 iovcnt = 2; 1920 iovs[0].iov_base = buf + 16; 1921 iovs[0].iov_len = 256; 1922 iovs[1].iov_base = buf + 16 + 256 + 32; 1923 iovs[1].iov_len = 256; 1924 1925 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1926 CU_ASSERT(rc == 0); 1927 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1928 stub_complete_io(1); 1929 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1930 1931 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1932 CU_ASSERT(rc == 0); 1933 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1934 stub_complete_io(1); 1935 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1936 1937 /* Pass unaligned iov with 2048 alignment required */ 1938 alignment = 2048; 1939 bdev->required_alignment = spdk_u32log2(alignment); 1940 1941 iovcnt = 2; 1942 iovs[0].iov_base = buf + 16; 1943 iovs[0].iov_len = 256; 1944 iovs[1].iov_base = buf + 16 + 256 + 32; 1945 iovs[1].iov_len = 256; 1946 1947 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1948 CU_ASSERT(rc == 0); 1949 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1950 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1951 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1952 alignment)); 1953 stub_complete_io(1); 1954 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1955 1956 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1957 CU_ASSERT(rc == 0); 1958 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1959 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1960 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1961 alignment)); 1962 stub_complete_io(1); 1963 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1964 1965 /* Pass iov without allocated buffer without alignment required */ 1966 alignment = 1; 1967 bdev->required_alignment = spdk_u32log2(alignment); 1968 1969 iovcnt = 1; 1970 iovs[0].iov_base = NULL; 1971 iovs[0].iov_len = 0; 1972 1973 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1974 CU_ASSERT(rc == 0); 1975 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1976 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1977 alignment)); 1978 stub_complete_io(1); 1979 1980 /* Pass iov without allocated buffer with 1024 alignment required */ 1981 alignment = 1024; 1982 bdev->required_alignment = spdk_u32log2(alignment); 1983 1984 iovcnt = 1; 1985 iovs[0].iov_base = NULL; 1986 iovs[0].iov_len = 0; 1987 1988 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1989 CU_ASSERT(rc == 0); 1990 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1991 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1992 alignment)); 1993 stub_complete_io(1); 1994 1995 spdk_put_io_channel(io_ch); 1996 spdk_bdev_close(desc); 1997 free_bdev(bdev); 1998 fn_table.submit_request = stub_submit_request; 1999 spdk_bdev_finish(bdev_fini_cb, NULL); 2000 poll_threads(); 2001 2002 free(buf); 2003 } 2004 2005 static void 2006 bdev_io_alignment_with_boundary(void) 2007 { 2008 struct spdk_bdev *bdev; 2009 struct spdk_bdev_desc *desc = NULL; 2010 struct spdk_io_channel *io_ch; 2011 struct spdk_bdev_opts bdev_opts = { 2012 .bdev_io_pool_size = 20, 2013 .bdev_io_cache_size = 2, 2014 }; 2015 int rc; 2016 void *buf = NULL; 2017 struct iovec iovs[2]; 2018 int iovcnt; 2019 uint64_t alignment; 2020 2021 rc = spdk_bdev_set_opts(&bdev_opts); 2022 CU_ASSERT(rc == 0); 2023 spdk_bdev_initialize(bdev_init_cb, NULL); 2024 2025 fn_table.submit_request = stub_submit_request_get_buf; 2026 bdev = allocate_bdev("bdev0"); 2027 2028 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2029 CU_ASSERT(rc == 0); 2030 CU_ASSERT(desc != NULL); 2031 io_ch = spdk_bdev_get_io_channel(desc); 2032 CU_ASSERT(io_ch != NULL); 2033 2034 /* Create aligned buffer */ 2035 rc = posix_memalign(&buf, 4096, 131072); 2036 SPDK_CU_ASSERT_FATAL(rc == 0); 2037 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2038 2039 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2040 alignment = 512; 2041 bdev->required_alignment = spdk_u32log2(alignment); 2042 bdev->optimal_io_boundary = 2; 2043 bdev->split_on_optimal_io_boundary = true; 2044 2045 iovcnt = 1; 2046 iovs[0].iov_base = NULL; 2047 iovs[0].iov_len = 512 * 3; 2048 2049 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2050 CU_ASSERT(rc == 0); 2051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2052 stub_complete_io(2); 2053 2054 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2055 alignment = 512; 2056 bdev->required_alignment = spdk_u32log2(alignment); 2057 bdev->optimal_io_boundary = 16; 2058 bdev->split_on_optimal_io_boundary = true; 2059 2060 iovcnt = 1; 2061 iovs[0].iov_base = NULL; 2062 iovs[0].iov_len = 512 * 16; 2063 2064 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2065 CU_ASSERT(rc == 0); 2066 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2067 stub_complete_io(2); 2068 2069 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2070 alignment = 512; 2071 bdev->required_alignment = spdk_u32log2(alignment); 2072 bdev->optimal_io_boundary = 128; 2073 bdev->split_on_optimal_io_boundary = true; 2074 2075 iovcnt = 1; 2076 iovs[0].iov_base = buf + 16; 2077 iovs[0].iov_len = 512 * 160; 2078 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2079 CU_ASSERT(rc == 0); 2080 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2081 stub_complete_io(2); 2082 2083 /* 512 * 3 with 2 IO boundary */ 2084 alignment = 512; 2085 bdev->required_alignment = spdk_u32log2(alignment); 2086 bdev->optimal_io_boundary = 2; 2087 bdev->split_on_optimal_io_boundary = true; 2088 2089 iovcnt = 2; 2090 iovs[0].iov_base = buf + 16; 2091 iovs[0].iov_len = 512; 2092 iovs[1].iov_base = buf + 16 + 512 + 32; 2093 iovs[1].iov_len = 1024; 2094 2095 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2096 CU_ASSERT(rc == 0); 2097 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2098 stub_complete_io(2); 2099 2100 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2101 CU_ASSERT(rc == 0); 2102 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2103 stub_complete_io(2); 2104 2105 /* 512 * 64 with 32 IO boundary */ 2106 bdev->optimal_io_boundary = 32; 2107 iovcnt = 2; 2108 iovs[0].iov_base = buf + 16; 2109 iovs[0].iov_len = 16384; 2110 iovs[1].iov_base = buf + 16 + 16384 + 32; 2111 iovs[1].iov_len = 16384; 2112 2113 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2114 CU_ASSERT(rc == 0); 2115 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2116 stub_complete_io(3); 2117 2118 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2119 CU_ASSERT(rc == 0); 2120 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2121 stub_complete_io(3); 2122 2123 /* 512 * 160 with 32 IO boundary */ 2124 iovcnt = 1; 2125 iovs[0].iov_base = buf + 16; 2126 iovs[0].iov_len = 16384 + 65536; 2127 2128 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2129 CU_ASSERT(rc == 0); 2130 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2131 stub_complete_io(6); 2132 2133 spdk_put_io_channel(io_ch); 2134 spdk_bdev_close(desc); 2135 free_bdev(bdev); 2136 fn_table.submit_request = stub_submit_request; 2137 spdk_bdev_finish(bdev_fini_cb, NULL); 2138 poll_threads(); 2139 2140 free(buf); 2141 } 2142 2143 static void 2144 histogram_status_cb(void *cb_arg, int status) 2145 { 2146 g_status = status; 2147 } 2148 2149 static void 2150 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 2151 { 2152 g_status = status; 2153 g_histogram = histogram; 2154 } 2155 2156 static void 2157 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 2158 uint64_t total, uint64_t so_far) 2159 { 2160 g_count += count; 2161 } 2162 2163 static void 2164 bdev_histograms(void) 2165 { 2166 struct spdk_bdev *bdev; 2167 struct spdk_bdev_desc *desc = NULL; 2168 struct spdk_io_channel *ch; 2169 struct spdk_histogram_data *histogram; 2170 uint8_t buf[4096]; 2171 int rc; 2172 2173 spdk_bdev_initialize(bdev_init_cb, NULL); 2174 2175 bdev = allocate_bdev("bdev"); 2176 2177 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2178 CU_ASSERT(rc == 0); 2179 CU_ASSERT(desc != NULL); 2180 2181 ch = spdk_bdev_get_io_channel(desc); 2182 CU_ASSERT(ch != NULL); 2183 2184 /* Enable histogram */ 2185 g_status = -1; 2186 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 2187 poll_threads(); 2188 CU_ASSERT(g_status == 0); 2189 CU_ASSERT(bdev->internal.histogram_enabled == true); 2190 2191 /* Allocate histogram */ 2192 histogram = spdk_histogram_data_alloc(); 2193 SPDK_CU_ASSERT_FATAL(histogram != NULL); 2194 2195 /* Check if histogram is zeroed */ 2196 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2197 poll_threads(); 2198 CU_ASSERT(g_status == 0); 2199 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2200 2201 g_count = 0; 2202 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2203 2204 CU_ASSERT(g_count == 0); 2205 2206 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2207 CU_ASSERT(rc == 0); 2208 2209 spdk_delay_us(10); 2210 stub_complete_io(1); 2211 poll_threads(); 2212 2213 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2214 CU_ASSERT(rc == 0); 2215 2216 spdk_delay_us(10); 2217 stub_complete_io(1); 2218 poll_threads(); 2219 2220 /* Check if histogram gathered data from all I/O channels */ 2221 g_histogram = NULL; 2222 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2223 poll_threads(); 2224 CU_ASSERT(g_status == 0); 2225 CU_ASSERT(bdev->internal.histogram_enabled == true); 2226 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2227 2228 g_count = 0; 2229 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2230 CU_ASSERT(g_count == 2); 2231 2232 /* Disable histogram */ 2233 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 2234 poll_threads(); 2235 CU_ASSERT(g_status == 0); 2236 CU_ASSERT(bdev->internal.histogram_enabled == false); 2237 2238 /* Try to run histogram commands on disabled bdev */ 2239 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2240 poll_threads(); 2241 CU_ASSERT(g_status == -EFAULT); 2242 2243 spdk_histogram_data_free(histogram); 2244 spdk_put_io_channel(ch); 2245 spdk_bdev_close(desc); 2246 free_bdev(bdev); 2247 spdk_bdev_finish(bdev_fini_cb, NULL); 2248 poll_threads(); 2249 } 2250 2251 static void 2252 _bdev_compare(bool emulated) 2253 { 2254 struct spdk_bdev *bdev; 2255 struct spdk_bdev_desc *desc = NULL; 2256 struct spdk_io_channel *ioch; 2257 struct ut_expected_io *expected_io; 2258 uint64_t offset, num_blocks; 2259 uint32_t num_completed; 2260 char aa_buf[512]; 2261 char bb_buf[512]; 2262 struct iovec compare_iov; 2263 uint8_t io_type; 2264 int rc; 2265 2266 if (emulated) { 2267 io_type = SPDK_BDEV_IO_TYPE_READ; 2268 } else { 2269 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 2270 } 2271 2272 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2273 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2274 2275 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 2276 2277 spdk_bdev_initialize(bdev_init_cb, NULL); 2278 fn_table.submit_request = stub_submit_request_get_buf; 2279 bdev = allocate_bdev("bdev"); 2280 2281 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2282 CU_ASSERT_EQUAL(rc, 0); 2283 SPDK_CU_ASSERT_FATAL(desc != NULL); 2284 ioch = spdk_bdev_get_io_channel(desc); 2285 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2286 2287 fn_table.submit_request = stub_submit_request_get_buf; 2288 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2289 2290 offset = 50; 2291 num_blocks = 1; 2292 compare_iov.iov_base = aa_buf; 2293 compare_iov.iov_len = sizeof(aa_buf); 2294 2295 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2296 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2297 2298 g_io_done = false; 2299 g_compare_read_buf = aa_buf; 2300 g_compare_read_buf_len = sizeof(aa_buf); 2301 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2302 CU_ASSERT_EQUAL(rc, 0); 2303 num_completed = stub_complete_io(1); 2304 CU_ASSERT_EQUAL(num_completed, 1); 2305 CU_ASSERT(g_io_done == true); 2306 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2307 2308 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2309 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2310 2311 g_io_done = false; 2312 g_compare_read_buf = bb_buf; 2313 g_compare_read_buf_len = sizeof(bb_buf); 2314 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2315 CU_ASSERT_EQUAL(rc, 0); 2316 num_completed = stub_complete_io(1); 2317 CU_ASSERT_EQUAL(num_completed, 1); 2318 CU_ASSERT(g_io_done == true); 2319 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2320 2321 spdk_put_io_channel(ioch); 2322 spdk_bdev_close(desc); 2323 free_bdev(bdev); 2324 fn_table.submit_request = stub_submit_request; 2325 spdk_bdev_finish(bdev_fini_cb, NULL); 2326 poll_threads(); 2327 2328 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2329 2330 g_compare_read_buf = NULL; 2331 } 2332 2333 static void 2334 bdev_compare(void) 2335 { 2336 _bdev_compare(true); 2337 _bdev_compare(false); 2338 } 2339 2340 static void 2341 bdev_compare_and_write(void) 2342 { 2343 struct spdk_bdev *bdev; 2344 struct spdk_bdev_desc *desc = NULL; 2345 struct spdk_io_channel *ioch; 2346 struct ut_expected_io *expected_io; 2347 uint64_t offset, num_blocks; 2348 uint32_t num_completed; 2349 char aa_buf[512]; 2350 char bb_buf[512]; 2351 char cc_buf[512]; 2352 char write_buf[512]; 2353 struct iovec compare_iov; 2354 struct iovec write_iov; 2355 int rc; 2356 2357 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2358 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2359 memset(cc_buf, 0xcc, sizeof(cc_buf)); 2360 2361 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 2362 2363 spdk_bdev_initialize(bdev_init_cb, NULL); 2364 fn_table.submit_request = stub_submit_request_get_buf; 2365 bdev = allocate_bdev("bdev"); 2366 2367 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2368 CU_ASSERT_EQUAL(rc, 0); 2369 SPDK_CU_ASSERT_FATAL(desc != NULL); 2370 ioch = spdk_bdev_get_io_channel(desc); 2371 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2372 2373 fn_table.submit_request = stub_submit_request_get_buf; 2374 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2375 2376 offset = 50; 2377 num_blocks = 1; 2378 compare_iov.iov_base = aa_buf; 2379 compare_iov.iov_len = sizeof(aa_buf); 2380 write_iov.iov_base = bb_buf; 2381 write_iov.iov_len = sizeof(bb_buf); 2382 2383 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2384 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2385 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 2386 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2387 2388 g_io_done = false; 2389 g_compare_read_buf = aa_buf; 2390 g_compare_read_buf_len = sizeof(aa_buf); 2391 memset(write_buf, 0, sizeof(write_buf)); 2392 g_compare_write_buf = write_buf; 2393 g_compare_write_buf_len = sizeof(write_buf); 2394 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2395 offset, num_blocks, io_done, NULL); 2396 /* Trigger range locking */ 2397 poll_threads(); 2398 CU_ASSERT_EQUAL(rc, 0); 2399 num_completed = stub_complete_io(1); 2400 CU_ASSERT_EQUAL(num_completed, 1); 2401 CU_ASSERT(g_io_done == false); 2402 num_completed = stub_complete_io(1); 2403 /* Trigger range unlocking */ 2404 poll_threads(); 2405 CU_ASSERT_EQUAL(num_completed, 1); 2406 CU_ASSERT(g_io_done == true); 2407 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2408 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 2409 2410 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2411 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2412 2413 g_io_done = false; 2414 g_compare_read_buf = cc_buf; 2415 g_compare_read_buf_len = sizeof(cc_buf); 2416 memset(write_buf, 0, sizeof(write_buf)); 2417 g_compare_write_buf = write_buf; 2418 g_compare_write_buf_len = sizeof(write_buf); 2419 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2420 offset, num_blocks, io_done, NULL); 2421 /* Trigger range locking */ 2422 poll_threads(); 2423 CU_ASSERT_EQUAL(rc, 0); 2424 num_completed = stub_complete_io(1); 2425 /* Trigger range unlocking earlier because we expect error here */ 2426 poll_threads(); 2427 CU_ASSERT_EQUAL(num_completed, 1); 2428 CU_ASSERT(g_io_done == true); 2429 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2430 num_completed = stub_complete_io(1); 2431 CU_ASSERT_EQUAL(num_completed, 0); 2432 2433 spdk_put_io_channel(ioch); 2434 spdk_bdev_close(desc); 2435 free_bdev(bdev); 2436 fn_table.submit_request = stub_submit_request; 2437 spdk_bdev_finish(bdev_fini_cb, NULL); 2438 poll_threads(); 2439 2440 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2441 2442 g_compare_read_buf = NULL; 2443 g_compare_write_buf = NULL; 2444 } 2445 2446 static void 2447 bdev_write_zeroes(void) 2448 { 2449 struct spdk_bdev *bdev; 2450 struct spdk_bdev_desc *desc = NULL; 2451 struct spdk_io_channel *ioch; 2452 struct ut_expected_io *expected_io; 2453 uint64_t offset, num_io_blocks, num_blocks; 2454 uint32_t num_completed, num_requests; 2455 int rc; 2456 2457 spdk_bdev_initialize(bdev_init_cb, NULL); 2458 bdev = allocate_bdev("bdev"); 2459 2460 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2461 CU_ASSERT_EQUAL(rc, 0); 2462 SPDK_CU_ASSERT_FATAL(desc != NULL); 2463 ioch = spdk_bdev_get_io_channel(desc); 2464 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2465 2466 fn_table.submit_request = stub_submit_request; 2467 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2468 2469 /* First test that if the bdev supports write_zeroes, the request won't be split */ 2470 bdev->md_len = 0; 2471 bdev->blocklen = 4096; 2472 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2473 2474 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 2475 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2476 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2477 CU_ASSERT_EQUAL(rc, 0); 2478 num_completed = stub_complete_io(1); 2479 CU_ASSERT_EQUAL(num_completed, 1); 2480 2481 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 2482 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 2483 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 2484 num_requests = 2; 2485 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 2486 2487 for (offset = 0; offset < num_requests; ++offset) { 2488 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2489 offset * num_io_blocks, num_io_blocks, 0); 2490 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2491 } 2492 2493 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2494 CU_ASSERT_EQUAL(rc, 0); 2495 num_completed = stub_complete_io(num_requests); 2496 CU_ASSERT_EQUAL(num_completed, num_requests); 2497 2498 /* Check that the splitting is correct if bdev has interleaved metadata */ 2499 bdev->md_interleave = true; 2500 bdev->md_len = 64; 2501 bdev->blocklen = 4096 + 64; 2502 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2503 2504 num_requests = offset = 0; 2505 while (offset < num_blocks) { 2506 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 2507 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2508 offset, num_io_blocks, 0); 2509 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2510 offset += num_io_blocks; 2511 num_requests++; 2512 } 2513 2514 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2515 CU_ASSERT_EQUAL(rc, 0); 2516 num_completed = stub_complete_io(num_requests); 2517 CU_ASSERT_EQUAL(num_completed, num_requests); 2518 num_completed = stub_complete_io(num_requests); 2519 assert(num_completed == 0); 2520 2521 /* Check the the same for separate metadata buffer */ 2522 bdev->md_interleave = false; 2523 bdev->md_len = 64; 2524 bdev->blocklen = 4096; 2525 2526 num_requests = offset = 0; 2527 while (offset < num_blocks) { 2528 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 2529 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2530 offset, num_io_blocks, 0); 2531 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 2532 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2533 offset += num_io_blocks; 2534 num_requests++; 2535 } 2536 2537 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2538 CU_ASSERT_EQUAL(rc, 0); 2539 num_completed = stub_complete_io(num_requests); 2540 CU_ASSERT_EQUAL(num_completed, num_requests); 2541 2542 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 2543 spdk_put_io_channel(ioch); 2544 spdk_bdev_close(desc); 2545 free_bdev(bdev); 2546 spdk_bdev_finish(bdev_fini_cb, NULL); 2547 poll_threads(); 2548 } 2549 2550 static void 2551 bdev_open_while_hotremove(void) 2552 { 2553 struct spdk_bdev *bdev; 2554 struct spdk_bdev_desc *desc[2] = {}; 2555 int rc; 2556 2557 bdev = allocate_bdev("bdev"); 2558 2559 rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[0]); 2560 CU_ASSERT(rc == 0); 2561 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 2562 2563 spdk_bdev_unregister(bdev, NULL, NULL); 2564 2565 rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[1]); 2566 CU_ASSERT(rc == -ENODEV); 2567 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 2568 2569 spdk_bdev_close(desc[0]); 2570 free_bdev(bdev); 2571 } 2572 2573 static void 2574 bdev_close_while_hotremove(void) 2575 { 2576 struct spdk_bdev *bdev; 2577 struct spdk_bdev_desc *desc = NULL; 2578 int rc = 0; 2579 2580 bdev = allocate_bdev("bdev"); 2581 2582 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 2583 CU_ASSERT_EQUAL(rc, 0); 2584 2585 /* Simulate hot-unplug by unregistering bdev */ 2586 g_event_type1 = 0xFF; 2587 g_unregister_arg = NULL; 2588 g_unregister_rc = -1; 2589 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 2590 /* Close device while remove event is in flight */ 2591 spdk_bdev_close(desc); 2592 2593 /* Ensure that unregister callback is delayed */ 2594 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 2595 CU_ASSERT_EQUAL(g_unregister_rc, -1); 2596 2597 poll_threads(); 2598 2599 /* Event callback shall not be issued because device was closed */ 2600 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 2601 /* Unregister callback is issued */ 2602 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 2603 CU_ASSERT_EQUAL(g_unregister_rc, 0); 2604 2605 free_bdev(bdev); 2606 } 2607 2608 static void 2609 bdev_open_ext(void) 2610 { 2611 struct spdk_bdev *bdev; 2612 struct spdk_bdev_desc *desc1 = NULL; 2613 struct spdk_bdev_desc *desc2 = NULL; 2614 int rc = 0; 2615 2616 bdev = allocate_bdev("bdev"); 2617 2618 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 2619 CU_ASSERT_EQUAL(rc, -EINVAL); 2620 2621 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 2622 CU_ASSERT_EQUAL(rc, 0); 2623 2624 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 2625 CU_ASSERT_EQUAL(rc, 0); 2626 2627 g_event_type1 = 0xFF; 2628 g_event_type2 = 0xFF; 2629 2630 /* Simulate hot-unplug by unregistering bdev */ 2631 spdk_bdev_unregister(bdev, NULL, NULL); 2632 poll_threads(); 2633 2634 /* Check if correct events have been triggered in event callback fn */ 2635 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 2636 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 2637 2638 free_bdev(bdev); 2639 poll_threads(); 2640 } 2641 2642 struct timeout_io_cb_arg { 2643 struct iovec iov; 2644 uint8_t type; 2645 }; 2646 2647 static int 2648 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 2649 { 2650 struct spdk_bdev_io *bdev_io; 2651 int n = 0; 2652 2653 if (!ch) { 2654 return -1; 2655 } 2656 2657 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 2658 n++; 2659 } 2660 2661 return n; 2662 } 2663 2664 static void 2665 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 2666 { 2667 struct timeout_io_cb_arg *ctx = cb_arg; 2668 2669 ctx->type = bdev_io->type; 2670 ctx->iov.iov_base = bdev_io->iov.iov_base; 2671 ctx->iov.iov_len = bdev_io->iov.iov_len; 2672 } 2673 2674 static void 2675 bdev_set_io_timeout(void) 2676 { 2677 struct spdk_bdev *bdev; 2678 struct spdk_bdev_desc *desc = NULL; 2679 struct spdk_io_channel *io_ch = NULL; 2680 struct spdk_bdev_channel *bdev_ch = NULL; 2681 struct timeout_io_cb_arg cb_arg; 2682 2683 spdk_bdev_initialize(bdev_init_cb, NULL); 2684 2685 bdev = allocate_bdev("bdev"); 2686 2687 CU_ASSERT(spdk_bdev_open(bdev, true, NULL, NULL, &desc) == 0); 2688 SPDK_CU_ASSERT_FATAL(desc != NULL); 2689 io_ch = spdk_bdev_get_io_channel(desc); 2690 CU_ASSERT(io_ch != NULL); 2691 2692 bdev_ch = spdk_io_channel_get_ctx(io_ch); 2693 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 2694 2695 /* This is the part1. 2696 * We will check the bdev_ch->io_submitted list 2697 * TO make sure that it can link IOs and only the user submitted IOs 2698 */ 2699 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 2700 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2701 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 2702 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2703 stub_complete_io(1); 2704 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2705 stub_complete_io(1); 2706 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2707 2708 /* Split IO */ 2709 bdev->optimal_io_boundary = 16; 2710 bdev->split_on_optimal_io_boundary = true; 2711 2712 /* Now test that a single-vector command is split correctly. 2713 * Offset 14, length 8, payload 0xF000 2714 * Child - Offset 14, length 2, payload 0xF000 2715 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2716 * 2717 * Set up the expected values before calling spdk_bdev_read_blocks 2718 */ 2719 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2720 /* We count all submitted IOs including IO that are generated by splitting. */ 2721 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 2722 stub_complete_io(1); 2723 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2724 stub_complete_io(1); 2725 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2726 2727 /* Also include the reset IO */ 2728 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2729 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2730 poll_threads(); 2731 stub_complete_io(1); 2732 poll_threads(); 2733 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2734 2735 /* This is part2 2736 * Test the desc timeout poller register 2737 */ 2738 2739 /* Successfully set the timeout */ 2740 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2741 CU_ASSERT(desc->io_timeout_poller != NULL); 2742 CU_ASSERT(desc->timeout_in_sec == 30); 2743 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2744 CU_ASSERT(desc->cb_arg == &cb_arg); 2745 2746 /* Change the timeout limit */ 2747 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2748 CU_ASSERT(desc->io_timeout_poller != NULL); 2749 CU_ASSERT(desc->timeout_in_sec == 20); 2750 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2751 CU_ASSERT(desc->cb_arg == &cb_arg); 2752 2753 /* Disable the timeout */ 2754 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 2755 CU_ASSERT(desc->io_timeout_poller == NULL); 2756 2757 /* This the part3 2758 * We will test to catch timeout IO and check whether the IO is 2759 * the submitted one. 2760 */ 2761 memset(&cb_arg, 0, sizeof(cb_arg)); 2762 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2763 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 2764 2765 /* Don't reach the limit */ 2766 spdk_delay_us(15 * spdk_get_ticks_hz()); 2767 poll_threads(); 2768 CU_ASSERT(cb_arg.type == 0); 2769 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2770 CU_ASSERT(cb_arg.iov.iov_len == 0); 2771 2772 /* 15 + 15 = 30 reach the limit */ 2773 spdk_delay_us(15 * spdk_get_ticks_hz()); 2774 poll_threads(); 2775 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2776 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 2777 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 2778 stub_complete_io(1); 2779 2780 /* Use the same split IO above and check the IO */ 2781 memset(&cb_arg, 0, sizeof(cb_arg)); 2782 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2783 2784 /* The first child complete in time */ 2785 spdk_delay_us(15 * spdk_get_ticks_hz()); 2786 poll_threads(); 2787 stub_complete_io(1); 2788 CU_ASSERT(cb_arg.type == 0); 2789 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2790 CU_ASSERT(cb_arg.iov.iov_len == 0); 2791 2792 /* The second child reach the limit */ 2793 spdk_delay_us(15 * spdk_get_ticks_hz()); 2794 poll_threads(); 2795 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2796 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 2797 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 2798 stub_complete_io(1); 2799 2800 /* Also include the reset IO */ 2801 memset(&cb_arg, 0, sizeof(cb_arg)); 2802 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2803 spdk_delay_us(30 * spdk_get_ticks_hz()); 2804 poll_threads(); 2805 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 2806 stub_complete_io(1); 2807 poll_threads(); 2808 2809 spdk_put_io_channel(io_ch); 2810 spdk_bdev_close(desc); 2811 free_bdev(bdev); 2812 spdk_bdev_finish(bdev_fini_cb, NULL); 2813 poll_threads(); 2814 } 2815 2816 static void 2817 lba_range_overlap(void) 2818 { 2819 struct lba_range r1, r2; 2820 2821 r1.offset = 100; 2822 r1.length = 50; 2823 2824 r2.offset = 0; 2825 r2.length = 1; 2826 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2827 2828 r2.offset = 0; 2829 r2.length = 100; 2830 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2831 2832 r2.offset = 0; 2833 r2.length = 110; 2834 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2835 2836 r2.offset = 100; 2837 r2.length = 10; 2838 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2839 2840 r2.offset = 110; 2841 r2.length = 20; 2842 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2843 2844 r2.offset = 140; 2845 r2.length = 150; 2846 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2847 2848 r2.offset = 130; 2849 r2.length = 200; 2850 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2851 2852 r2.offset = 150; 2853 r2.length = 100; 2854 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2855 2856 r2.offset = 110; 2857 r2.length = 0; 2858 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2859 } 2860 2861 static bool g_lock_lba_range_done; 2862 static bool g_unlock_lba_range_done; 2863 2864 static void 2865 lock_lba_range_done(void *ctx, int status) 2866 { 2867 g_lock_lba_range_done = true; 2868 } 2869 2870 static void 2871 unlock_lba_range_done(void *ctx, int status) 2872 { 2873 g_unlock_lba_range_done = true; 2874 } 2875 2876 static void 2877 lock_lba_range_check_ranges(void) 2878 { 2879 struct spdk_bdev *bdev; 2880 struct spdk_bdev_desc *desc = NULL; 2881 struct spdk_io_channel *io_ch; 2882 struct spdk_bdev_channel *channel; 2883 struct lba_range *range; 2884 int ctx1; 2885 int rc; 2886 2887 spdk_bdev_initialize(bdev_init_cb, NULL); 2888 2889 bdev = allocate_bdev("bdev0"); 2890 2891 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2892 CU_ASSERT(rc == 0); 2893 CU_ASSERT(desc != NULL); 2894 io_ch = spdk_bdev_get_io_channel(desc); 2895 CU_ASSERT(io_ch != NULL); 2896 channel = spdk_io_channel_get_ctx(io_ch); 2897 2898 g_lock_lba_range_done = false; 2899 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2900 CU_ASSERT(rc == 0); 2901 poll_threads(); 2902 2903 CU_ASSERT(g_lock_lba_range_done == true); 2904 range = TAILQ_FIRST(&channel->locked_ranges); 2905 SPDK_CU_ASSERT_FATAL(range != NULL); 2906 CU_ASSERT(range->offset == 20); 2907 CU_ASSERT(range->length == 10); 2908 CU_ASSERT(range->owner_ch == channel); 2909 2910 /* Unlocks must exactly match a lock. */ 2911 g_unlock_lba_range_done = false; 2912 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 2913 CU_ASSERT(rc == -EINVAL); 2914 CU_ASSERT(g_unlock_lba_range_done == false); 2915 2916 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 2917 CU_ASSERT(rc == 0); 2918 spdk_delay_us(100); 2919 poll_threads(); 2920 2921 CU_ASSERT(g_unlock_lba_range_done == true); 2922 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 2923 2924 spdk_put_io_channel(io_ch); 2925 spdk_bdev_close(desc); 2926 free_bdev(bdev); 2927 spdk_bdev_finish(bdev_fini_cb, NULL); 2928 poll_threads(); 2929 } 2930 2931 static void 2932 lock_lba_range_with_io_outstanding(void) 2933 { 2934 struct spdk_bdev *bdev; 2935 struct spdk_bdev_desc *desc = NULL; 2936 struct spdk_io_channel *io_ch; 2937 struct spdk_bdev_channel *channel; 2938 struct lba_range *range; 2939 char buf[4096]; 2940 int ctx1; 2941 int rc; 2942 2943 spdk_bdev_initialize(bdev_init_cb, NULL); 2944 2945 bdev = allocate_bdev("bdev0"); 2946 2947 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2948 CU_ASSERT(rc == 0); 2949 CU_ASSERT(desc != NULL); 2950 io_ch = spdk_bdev_get_io_channel(desc); 2951 CU_ASSERT(io_ch != NULL); 2952 channel = spdk_io_channel_get_ctx(io_ch); 2953 2954 g_io_done = false; 2955 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 2956 CU_ASSERT(rc == 0); 2957 2958 g_lock_lba_range_done = false; 2959 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2960 CU_ASSERT(rc == 0); 2961 poll_threads(); 2962 2963 /* The lock should immediately become valid, since there are no outstanding 2964 * write I/O. 2965 */ 2966 CU_ASSERT(g_io_done == false); 2967 CU_ASSERT(g_lock_lba_range_done == true); 2968 range = TAILQ_FIRST(&channel->locked_ranges); 2969 SPDK_CU_ASSERT_FATAL(range != NULL); 2970 CU_ASSERT(range->offset == 20); 2971 CU_ASSERT(range->length == 10); 2972 CU_ASSERT(range->owner_ch == channel); 2973 CU_ASSERT(range->locked_ctx == &ctx1); 2974 2975 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2976 CU_ASSERT(rc == 0); 2977 stub_complete_io(1); 2978 spdk_delay_us(100); 2979 poll_threads(); 2980 2981 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 2982 2983 /* Now try again, but with a write I/O. */ 2984 g_io_done = false; 2985 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 2986 CU_ASSERT(rc == 0); 2987 2988 g_lock_lba_range_done = false; 2989 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2990 CU_ASSERT(rc == 0); 2991 poll_threads(); 2992 2993 /* The lock should not be fully valid yet, since a write I/O is outstanding. 2994 * But note that the range should be on the channel's locked_list, to make sure no 2995 * new write I/O are started. 2996 */ 2997 CU_ASSERT(g_io_done == false); 2998 CU_ASSERT(g_lock_lba_range_done == false); 2999 range = TAILQ_FIRST(&channel->locked_ranges); 3000 SPDK_CU_ASSERT_FATAL(range != NULL); 3001 CU_ASSERT(range->offset == 20); 3002 CU_ASSERT(range->length == 10); 3003 3004 /* Complete the write I/O. This should make the lock valid (checked by confirming 3005 * our callback was invoked). 3006 */ 3007 stub_complete_io(1); 3008 spdk_delay_us(100); 3009 poll_threads(); 3010 CU_ASSERT(g_io_done == true); 3011 CU_ASSERT(g_lock_lba_range_done == true); 3012 3013 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3014 CU_ASSERT(rc == 0); 3015 poll_threads(); 3016 3017 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3018 3019 spdk_put_io_channel(io_ch); 3020 spdk_bdev_close(desc); 3021 free_bdev(bdev); 3022 spdk_bdev_finish(bdev_fini_cb, NULL); 3023 poll_threads(); 3024 } 3025 3026 static void 3027 lock_lba_range_overlapped(void) 3028 { 3029 struct spdk_bdev *bdev; 3030 struct spdk_bdev_desc *desc = NULL; 3031 struct spdk_io_channel *io_ch; 3032 struct spdk_bdev_channel *channel; 3033 struct lba_range *range; 3034 int ctx1; 3035 int rc; 3036 3037 spdk_bdev_initialize(bdev_init_cb, NULL); 3038 3039 bdev = allocate_bdev("bdev0"); 3040 3041 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 3042 CU_ASSERT(rc == 0); 3043 CU_ASSERT(desc != NULL); 3044 io_ch = spdk_bdev_get_io_channel(desc); 3045 CU_ASSERT(io_ch != NULL); 3046 channel = spdk_io_channel_get_ctx(io_ch); 3047 3048 /* Lock range 20-29. */ 3049 g_lock_lba_range_done = false; 3050 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3051 CU_ASSERT(rc == 0); 3052 poll_threads(); 3053 3054 CU_ASSERT(g_lock_lba_range_done == true); 3055 range = TAILQ_FIRST(&channel->locked_ranges); 3056 SPDK_CU_ASSERT_FATAL(range != NULL); 3057 CU_ASSERT(range->offset == 20); 3058 CU_ASSERT(range->length == 10); 3059 3060 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 3061 * 20-29. 3062 */ 3063 g_lock_lba_range_done = false; 3064 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 3065 CU_ASSERT(rc == 0); 3066 poll_threads(); 3067 3068 CU_ASSERT(g_lock_lba_range_done == false); 3069 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3070 SPDK_CU_ASSERT_FATAL(range != NULL); 3071 CU_ASSERT(range->offset == 25); 3072 CU_ASSERT(range->length == 15); 3073 3074 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 3075 * no longer overlaps with an active lock. 3076 */ 3077 g_unlock_lba_range_done = false; 3078 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3079 CU_ASSERT(rc == 0); 3080 poll_threads(); 3081 3082 CU_ASSERT(g_unlock_lba_range_done == true); 3083 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3084 range = TAILQ_FIRST(&channel->locked_ranges); 3085 SPDK_CU_ASSERT_FATAL(range != NULL); 3086 CU_ASSERT(range->offset == 25); 3087 CU_ASSERT(range->length == 15); 3088 3089 /* Lock 40-59. This should immediately lock since it does not overlap with the 3090 * currently active 25-39 lock. 3091 */ 3092 g_lock_lba_range_done = false; 3093 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 3094 CU_ASSERT(rc == 0); 3095 poll_threads(); 3096 3097 CU_ASSERT(g_lock_lba_range_done == true); 3098 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3099 SPDK_CU_ASSERT_FATAL(range != NULL); 3100 range = TAILQ_NEXT(range, tailq); 3101 SPDK_CU_ASSERT_FATAL(range != NULL); 3102 CU_ASSERT(range->offset == 40); 3103 CU_ASSERT(range->length == 20); 3104 3105 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 3106 g_lock_lba_range_done = false; 3107 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 3108 CU_ASSERT(rc == 0); 3109 poll_threads(); 3110 3111 CU_ASSERT(g_lock_lba_range_done == false); 3112 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3113 SPDK_CU_ASSERT_FATAL(range != NULL); 3114 CU_ASSERT(range->offset == 35); 3115 CU_ASSERT(range->length == 10); 3116 3117 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 3118 * the 40-59 lock is still active. 3119 */ 3120 g_unlock_lba_range_done = false; 3121 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 3122 CU_ASSERT(rc == 0); 3123 poll_threads(); 3124 3125 CU_ASSERT(g_unlock_lba_range_done == true); 3126 CU_ASSERT(g_lock_lba_range_done == false); 3127 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3128 SPDK_CU_ASSERT_FATAL(range != NULL); 3129 CU_ASSERT(range->offset == 35); 3130 CU_ASSERT(range->length == 10); 3131 3132 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 3133 * no longer any active overlapping locks. 3134 */ 3135 g_unlock_lba_range_done = false; 3136 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 3137 CU_ASSERT(rc == 0); 3138 poll_threads(); 3139 3140 CU_ASSERT(g_unlock_lba_range_done == true); 3141 CU_ASSERT(g_lock_lba_range_done == true); 3142 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3143 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3144 SPDK_CU_ASSERT_FATAL(range != NULL); 3145 CU_ASSERT(range->offset == 35); 3146 CU_ASSERT(range->length == 10); 3147 3148 /* Finally, unlock 35-44. */ 3149 g_unlock_lba_range_done = false; 3150 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 3151 CU_ASSERT(rc == 0); 3152 poll_threads(); 3153 3154 CU_ASSERT(g_unlock_lba_range_done == true); 3155 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 3156 3157 spdk_put_io_channel(io_ch); 3158 spdk_bdev_close(desc); 3159 free_bdev(bdev); 3160 spdk_bdev_finish(bdev_fini_cb, NULL); 3161 poll_threads(); 3162 } 3163 3164 static void 3165 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 3166 { 3167 g_abort_done = true; 3168 g_abort_status = bdev_io->internal.status; 3169 spdk_bdev_free_io(bdev_io); 3170 } 3171 3172 static void 3173 bdev_io_abort(void) 3174 { 3175 struct spdk_bdev *bdev; 3176 struct spdk_bdev_desc *desc = NULL; 3177 struct spdk_io_channel *io_ch; 3178 struct spdk_bdev_channel *channel; 3179 struct spdk_bdev_mgmt_channel *mgmt_ch; 3180 struct spdk_bdev_opts bdev_opts = { 3181 .bdev_io_pool_size = 7, 3182 .bdev_io_cache_size = 2, 3183 }; 3184 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 3185 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 3186 int rc; 3187 3188 rc = spdk_bdev_set_opts(&bdev_opts); 3189 CU_ASSERT(rc == 0); 3190 spdk_bdev_initialize(bdev_init_cb, NULL); 3191 3192 bdev = allocate_bdev("bdev0"); 3193 3194 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 3195 CU_ASSERT(rc == 0); 3196 CU_ASSERT(desc != NULL); 3197 io_ch = spdk_bdev_get_io_channel(desc); 3198 CU_ASSERT(io_ch != NULL); 3199 channel = spdk_io_channel_get_ctx(io_ch); 3200 mgmt_ch = channel->shared_resource->mgmt_ch; 3201 3202 g_abort_done = false; 3203 3204 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 3205 3206 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3207 CU_ASSERT(rc == -ENOTSUP); 3208 3209 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 3210 3211 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 3212 CU_ASSERT(rc == 0); 3213 CU_ASSERT(g_abort_done == true); 3214 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 3215 3216 /* Test the case that the target I/O was successfully aborted. */ 3217 g_io_done = false; 3218 3219 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3220 CU_ASSERT(rc == 0); 3221 CU_ASSERT(g_io_done == false); 3222 3223 g_abort_done = false; 3224 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3225 3226 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3227 CU_ASSERT(rc == 0); 3228 CU_ASSERT(g_io_done == true); 3229 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3230 stub_complete_io(1); 3231 CU_ASSERT(g_abort_done == true); 3232 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3233 3234 /* Test the case that the target I/O was not aborted because it completed 3235 * in the middle of execution of the abort. 3236 */ 3237 g_io_done = false; 3238 3239 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3240 CU_ASSERT(rc == 0); 3241 CU_ASSERT(g_io_done == false); 3242 3243 g_abort_done = false; 3244 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3245 3246 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3247 CU_ASSERT(rc == 0); 3248 CU_ASSERT(g_io_done == false); 3249 3250 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3251 stub_complete_io(1); 3252 CU_ASSERT(g_io_done == true); 3253 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3254 3255 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3256 stub_complete_io(1); 3257 CU_ASSERT(g_abort_done == true); 3258 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3259 3260 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3261 3262 bdev->optimal_io_boundary = 16; 3263 bdev->split_on_optimal_io_boundary = true; 3264 3265 /* Test that a single-vector command which is split is aborted correctly. 3266 * Offset 14, length 8, payload 0xF000 3267 * Child - Offset 14, length 2, payload 0xF000 3268 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3269 */ 3270 g_io_done = false; 3271 3272 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 3273 CU_ASSERT(rc == 0); 3274 CU_ASSERT(g_io_done == false); 3275 3276 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3277 3278 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3279 3280 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3281 CU_ASSERT(rc == 0); 3282 CU_ASSERT(g_io_done == true); 3283 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3284 stub_complete_io(2); 3285 CU_ASSERT(g_abort_done == true); 3286 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3287 3288 /* Test that a multi-vector command that needs to be split by strip and then 3289 * needs to be split is aborted correctly. Abort is requested before the second 3290 * child I/O was submitted. The parent I/O should complete with failure without 3291 * submitting the second child I/O. 3292 */ 3293 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 3294 iov[i].iov_base = (void *)((i + 1) * 0x10000); 3295 iov[i].iov_len = 512; 3296 } 3297 3298 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 3299 g_io_done = false; 3300 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 3301 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 3302 CU_ASSERT(rc == 0); 3303 CU_ASSERT(g_io_done == false); 3304 3305 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3306 3307 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3308 3309 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3310 CU_ASSERT(rc == 0); 3311 CU_ASSERT(g_io_done == true); 3312 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3313 stub_complete_io(1); 3314 CU_ASSERT(g_abort_done == true); 3315 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3316 3317 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3318 3319 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3320 3321 bdev->optimal_io_boundary = 16; 3322 g_io_done = false; 3323 3324 /* Test that a ingle-vector command which is split is aborted correctly. 3325 * Differently from the above, the child abort request will be submitted 3326 * sequentially due to the capacity of spdk_bdev_io. 3327 */ 3328 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 3329 CU_ASSERT(rc == 0); 3330 CU_ASSERT(g_io_done == false); 3331 3332 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 3333 3334 g_abort_done = false; 3335 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3336 3337 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3338 CU_ASSERT(rc == 0); 3339 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3340 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 3341 3342 stub_complete_io(1); 3343 CU_ASSERT(g_io_done == true); 3344 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3345 stub_complete_io(3); 3346 CU_ASSERT(g_abort_done == true); 3347 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3348 3349 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3350 3351 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3352 3353 spdk_put_io_channel(io_ch); 3354 spdk_bdev_close(desc); 3355 free_bdev(bdev); 3356 spdk_bdev_finish(bdev_fini_cb, NULL); 3357 poll_threads(); 3358 } 3359 3360 int 3361 main(int argc, char **argv) 3362 { 3363 CU_pSuite suite = NULL; 3364 unsigned int num_failures; 3365 3366 CU_set_error_action(CUEA_ABORT); 3367 CU_initialize_registry(); 3368 3369 suite = CU_add_suite("bdev", null_init, null_clean); 3370 3371 CU_ADD_TEST(suite, bytes_to_blocks_test); 3372 CU_ADD_TEST(suite, num_blocks_test); 3373 CU_ADD_TEST(suite, io_valid_test); 3374 CU_ADD_TEST(suite, open_write_test); 3375 CU_ADD_TEST(suite, alias_add_del_test); 3376 CU_ADD_TEST(suite, get_device_stat_test); 3377 CU_ADD_TEST(suite, bdev_io_types_test); 3378 CU_ADD_TEST(suite, bdev_io_wait_test); 3379 CU_ADD_TEST(suite, bdev_io_spans_boundary_test); 3380 CU_ADD_TEST(suite, bdev_io_split_test); 3381 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 3382 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 3383 CU_ADD_TEST(suite, bdev_io_alignment); 3384 CU_ADD_TEST(suite, bdev_histograms); 3385 CU_ADD_TEST(suite, bdev_write_zeroes); 3386 CU_ADD_TEST(suite, bdev_compare_and_write); 3387 CU_ADD_TEST(suite, bdev_compare); 3388 CU_ADD_TEST(suite, bdev_open_while_hotremove); 3389 CU_ADD_TEST(suite, bdev_close_while_hotremove); 3390 CU_ADD_TEST(suite, bdev_open_ext); 3391 CU_ADD_TEST(suite, bdev_set_io_timeout); 3392 CU_ADD_TEST(suite, lba_range_overlap); 3393 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 3394 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 3395 CU_ADD_TEST(suite, lock_lba_range_overlapped); 3396 CU_ADD_TEST(suite, bdev_io_abort); 3397 3398 allocate_cores(1); 3399 allocate_threads(1); 3400 set_thread(0); 3401 3402 CU_basic_set_mode(CU_BRM_VERBOSE); 3403 CU_basic_run_tests(); 3404 num_failures = CU_get_number_of_failures(); 3405 CU_cleanup_registry(); 3406 3407 free_threads(); 3408 free_cores(); 3409 3410 return num_failures; 3411 } 3412