1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp, 46 const char *name), NULL); 47 DEFINE_STUB(spdk_conf_section_get_nmval, char *, 48 (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL); 49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1); 50 51 struct spdk_trace_histories *g_trace_histories; 52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 56 uint16_t tpoint_id, uint8_t owner_type, 57 uint8_t object_type, uint8_t new_object, 58 uint8_t arg1_type, const char *arg1_name)); 59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 60 uint32_t size, uint64_t object_id, uint64_t arg1)); 61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 63 64 65 int g_status; 66 int g_count; 67 enum spdk_bdev_event_type g_event_type1; 68 enum spdk_bdev_event_type g_event_type2; 69 struct spdk_histogram_data *g_histogram; 70 void *g_unregister_arg; 71 int g_unregister_rc; 72 73 void 74 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 75 int *sc, int *sk, int *asc, int *ascq) 76 { 77 } 78 79 static int 80 null_init(void) 81 { 82 return 0; 83 } 84 85 static int 86 null_clean(void) 87 { 88 return 0; 89 } 90 91 static int 92 stub_destruct(void *ctx) 93 { 94 return 0; 95 } 96 97 struct ut_expected_io { 98 uint8_t type; 99 uint64_t offset; 100 uint64_t length; 101 int iovcnt; 102 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 103 void *md_buf; 104 TAILQ_ENTRY(ut_expected_io) link; 105 }; 106 107 struct bdev_ut_channel { 108 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 109 uint32_t outstanding_io_count; 110 TAILQ_HEAD(, ut_expected_io) expected_io; 111 }; 112 113 static bool g_io_done; 114 static struct spdk_bdev_io *g_bdev_io; 115 static enum spdk_bdev_io_status g_io_status; 116 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 117 static uint32_t g_bdev_ut_io_device; 118 static struct bdev_ut_channel *g_bdev_ut_channel; 119 static void *g_compare_read_buf; 120 static uint32_t g_compare_read_buf_len; 121 static void *g_compare_write_buf; 122 static uint32_t g_compare_write_buf_len; 123 static bool g_abort_done; 124 static enum spdk_bdev_io_status g_abort_status; 125 126 static struct ut_expected_io * 127 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 128 { 129 struct ut_expected_io *expected_io; 130 131 expected_io = calloc(1, sizeof(*expected_io)); 132 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 133 134 expected_io->type = type; 135 expected_io->offset = offset; 136 expected_io->length = length; 137 expected_io->iovcnt = iovcnt; 138 139 return expected_io; 140 } 141 142 static void 143 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 144 { 145 expected_io->iov[pos].iov_base = base; 146 expected_io->iov[pos].iov_len = len; 147 } 148 149 static void 150 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 151 { 152 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 153 struct ut_expected_io *expected_io; 154 struct iovec *iov, *expected_iov; 155 struct spdk_bdev_io *bio_to_abort; 156 int i; 157 158 g_bdev_io = bdev_io; 159 160 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 161 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 162 163 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 164 CU_ASSERT(g_compare_read_buf_len == len); 165 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 166 } 167 168 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 169 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 170 171 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 172 CU_ASSERT(g_compare_write_buf_len == len); 173 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 174 } 175 176 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 177 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 178 179 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 180 CU_ASSERT(g_compare_read_buf_len == len); 181 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 182 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 183 } 184 } 185 186 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 187 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 188 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 189 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 190 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 191 ch->outstanding_io_count--; 192 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 193 break; 194 } 195 } 196 } 197 } 198 199 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 200 ch->outstanding_io_count++; 201 202 expected_io = TAILQ_FIRST(&ch->expected_io); 203 if (expected_io == NULL) { 204 return; 205 } 206 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 207 208 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 209 CU_ASSERT(bdev_io->type == expected_io->type); 210 } 211 212 if (expected_io->md_buf != NULL) { 213 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 214 } 215 216 if (expected_io->length == 0) { 217 free(expected_io); 218 return; 219 } 220 221 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 222 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 223 224 if (expected_io->iovcnt == 0) { 225 free(expected_io); 226 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 227 return; 228 } 229 230 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 231 for (i = 0; i < expected_io->iovcnt; i++) { 232 iov = &bdev_io->u.bdev.iovs[i]; 233 expected_iov = &expected_io->iov[i]; 234 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 235 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 236 } 237 238 free(expected_io); 239 } 240 241 static void 242 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 243 struct spdk_bdev_io *bdev_io, bool success) 244 { 245 CU_ASSERT(success == true); 246 247 stub_submit_request(_ch, bdev_io); 248 } 249 250 static void 251 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 252 { 253 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 254 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 255 } 256 257 static uint32_t 258 stub_complete_io(uint32_t num_to_complete) 259 { 260 struct bdev_ut_channel *ch = g_bdev_ut_channel; 261 struct spdk_bdev_io *bdev_io; 262 static enum spdk_bdev_io_status io_status; 263 uint32_t num_completed = 0; 264 265 while (num_completed < num_to_complete) { 266 if (TAILQ_EMPTY(&ch->outstanding_io)) { 267 break; 268 } 269 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 270 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 271 ch->outstanding_io_count--; 272 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 273 g_io_exp_status; 274 spdk_bdev_io_complete(bdev_io, io_status); 275 num_completed++; 276 } 277 278 return num_completed; 279 } 280 281 static struct spdk_io_channel * 282 bdev_ut_get_io_channel(void *ctx) 283 { 284 return spdk_get_io_channel(&g_bdev_ut_io_device); 285 } 286 287 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 288 [SPDK_BDEV_IO_TYPE_READ] = true, 289 [SPDK_BDEV_IO_TYPE_WRITE] = true, 290 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 291 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 292 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 293 [SPDK_BDEV_IO_TYPE_RESET] = true, 294 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 295 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 296 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 297 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 298 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 299 [SPDK_BDEV_IO_TYPE_ABORT] = true, 300 }; 301 302 static void 303 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 304 { 305 g_io_types_supported[io_type] = enable; 306 } 307 308 static bool 309 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 310 { 311 return g_io_types_supported[io_type]; 312 } 313 314 static struct spdk_bdev_fn_table fn_table = { 315 .destruct = stub_destruct, 316 .submit_request = stub_submit_request, 317 .get_io_channel = bdev_ut_get_io_channel, 318 .io_type_supported = stub_io_type_supported, 319 }; 320 321 static int 322 bdev_ut_create_ch(void *io_device, void *ctx_buf) 323 { 324 struct bdev_ut_channel *ch = ctx_buf; 325 326 CU_ASSERT(g_bdev_ut_channel == NULL); 327 g_bdev_ut_channel = ch; 328 329 TAILQ_INIT(&ch->outstanding_io); 330 ch->outstanding_io_count = 0; 331 TAILQ_INIT(&ch->expected_io); 332 return 0; 333 } 334 335 static void 336 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 337 { 338 CU_ASSERT(g_bdev_ut_channel != NULL); 339 g_bdev_ut_channel = NULL; 340 } 341 342 struct spdk_bdev_module bdev_ut_if; 343 344 static int 345 bdev_ut_module_init(void) 346 { 347 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 348 sizeof(struct bdev_ut_channel), NULL); 349 spdk_bdev_module_init_done(&bdev_ut_if); 350 return 0; 351 } 352 353 static void 354 bdev_ut_module_fini(void) 355 { 356 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 357 } 358 359 struct spdk_bdev_module bdev_ut_if = { 360 .name = "bdev_ut", 361 .module_init = bdev_ut_module_init, 362 .module_fini = bdev_ut_module_fini, 363 .async_init = true, 364 }; 365 366 static void vbdev_ut_examine(struct spdk_bdev *bdev); 367 368 static int 369 vbdev_ut_module_init(void) 370 { 371 return 0; 372 } 373 374 static void 375 vbdev_ut_module_fini(void) 376 { 377 } 378 379 struct spdk_bdev_module vbdev_ut_if = { 380 .name = "vbdev_ut", 381 .module_init = vbdev_ut_module_init, 382 .module_fini = vbdev_ut_module_fini, 383 .examine_config = vbdev_ut_examine, 384 }; 385 386 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 387 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 388 389 static void 390 vbdev_ut_examine(struct spdk_bdev *bdev) 391 { 392 spdk_bdev_module_examine_done(&vbdev_ut_if); 393 } 394 395 static struct spdk_bdev * 396 allocate_bdev(char *name) 397 { 398 struct spdk_bdev *bdev; 399 int rc; 400 401 bdev = calloc(1, sizeof(*bdev)); 402 SPDK_CU_ASSERT_FATAL(bdev != NULL); 403 404 bdev->name = name; 405 bdev->fn_table = &fn_table; 406 bdev->module = &bdev_ut_if; 407 bdev->blockcnt = 1024; 408 bdev->blocklen = 512; 409 410 rc = spdk_bdev_register(bdev); 411 CU_ASSERT(rc == 0); 412 413 return bdev; 414 } 415 416 static struct spdk_bdev * 417 allocate_vbdev(char *name) 418 { 419 struct spdk_bdev *bdev; 420 int rc; 421 422 bdev = calloc(1, sizeof(*bdev)); 423 SPDK_CU_ASSERT_FATAL(bdev != NULL); 424 425 bdev->name = name; 426 bdev->fn_table = &fn_table; 427 bdev->module = &vbdev_ut_if; 428 429 rc = spdk_bdev_register(bdev); 430 CU_ASSERT(rc == 0); 431 432 return bdev; 433 } 434 435 static void 436 free_bdev(struct spdk_bdev *bdev) 437 { 438 spdk_bdev_unregister(bdev, NULL, NULL); 439 poll_threads(); 440 memset(bdev, 0xFF, sizeof(*bdev)); 441 free(bdev); 442 } 443 444 static void 445 free_vbdev(struct spdk_bdev *bdev) 446 { 447 spdk_bdev_unregister(bdev, NULL, NULL); 448 poll_threads(); 449 memset(bdev, 0xFF, sizeof(*bdev)); 450 free(bdev); 451 } 452 453 static void 454 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 455 { 456 const char *bdev_name; 457 458 CU_ASSERT(bdev != NULL); 459 CU_ASSERT(rc == 0); 460 bdev_name = spdk_bdev_get_name(bdev); 461 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 462 463 free(stat); 464 free_bdev(bdev); 465 466 *(bool *)cb_arg = true; 467 } 468 469 static void 470 bdev_unregister_cb(void *cb_arg, int rc) 471 { 472 g_unregister_arg = cb_arg; 473 g_unregister_rc = rc; 474 } 475 476 static void 477 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 478 { 479 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 480 481 g_event_type1 = type; 482 if (SPDK_BDEV_EVENT_REMOVE == type) { 483 spdk_bdev_close(desc); 484 } 485 } 486 487 static void 488 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 489 { 490 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 491 492 g_event_type2 = type; 493 if (SPDK_BDEV_EVENT_REMOVE == type) { 494 spdk_bdev_close(desc); 495 } 496 } 497 498 static void 499 get_device_stat_test(void) 500 { 501 struct spdk_bdev *bdev; 502 struct spdk_bdev_io_stat *stat; 503 bool done; 504 505 bdev = allocate_bdev("bdev0"); 506 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 507 if (stat == NULL) { 508 free_bdev(bdev); 509 return; 510 } 511 512 done = false; 513 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 514 while (!done) { poll_threads(); } 515 516 517 } 518 519 static void 520 open_write_test(void) 521 { 522 struct spdk_bdev *bdev[9]; 523 struct spdk_bdev_desc *desc[9] = {}; 524 int rc; 525 526 /* 527 * Create a tree of bdevs to test various open w/ write cases. 528 * 529 * bdev0 through bdev3 are physical block devices, such as NVMe 530 * namespaces or Ceph block devices. 531 * 532 * bdev4 is a virtual bdev with multiple base bdevs. This models 533 * caching or RAID use cases. 534 * 535 * bdev5 through bdev7 are all virtual bdevs with the same base 536 * bdev (except bdev7). This models partitioning or logical volume 537 * use cases. 538 * 539 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 540 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 541 * models caching, RAID, partitioning or logical volumes use cases. 542 * 543 * bdev8 is a virtual bdev with multiple base bdevs, but these 544 * base bdevs are themselves virtual bdevs. 545 * 546 * bdev8 547 * | 548 * +----------+ 549 * | | 550 * bdev4 bdev5 bdev6 bdev7 551 * | | | | 552 * +---+---+ +---+ + +---+---+ 553 * | | \ | / \ 554 * bdev0 bdev1 bdev2 bdev3 555 */ 556 557 bdev[0] = allocate_bdev("bdev0"); 558 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 559 CU_ASSERT(rc == 0); 560 561 bdev[1] = allocate_bdev("bdev1"); 562 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 563 CU_ASSERT(rc == 0); 564 565 bdev[2] = allocate_bdev("bdev2"); 566 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 567 CU_ASSERT(rc == 0); 568 569 bdev[3] = allocate_bdev("bdev3"); 570 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 571 CU_ASSERT(rc == 0); 572 573 bdev[4] = allocate_vbdev("bdev4"); 574 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 575 CU_ASSERT(rc == 0); 576 577 bdev[5] = allocate_vbdev("bdev5"); 578 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 579 CU_ASSERT(rc == 0); 580 581 bdev[6] = allocate_vbdev("bdev6"); 582 583 bdev[7] = allocate_vbdev("bdev7"); 584 585 bdev[8] = allocate_vbdev("bdev8"); 586 587 /* Open bdev0 read-only. This should succeed. */ 588 rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]); 589 CU_ASSERT(rc == 0); 590 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 591 spdk_bdev_close(desc[0]); 592 593 /* 594 * Open bdev1 read/write. This should fail since bdev1 has been claimed 595 * by a vbdev module. 596 */ 597 rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]); 598 CU_ASSERT(rc == -EPERM); 599 600 /* 601 * Open bdev4 read/write. This should fail since bdev3 has been claimed 602 * by a vbdev module. 603 */ 604 rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]); 605 CU_ASSERT(rc == -EPERM); 606 607 /* Open bdev4 read-only. This should succeed. */ 608 rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]); 609 CU_ASSERT(rc == 0); 610 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 611 spdk_bdev_close(desc[4]); 612 613 /* 614 * Open bdev8 read/write. This should succeed since it is a leaf 615 * bdev. 616 */ 617 rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]); 618 CU_ASSERT(rc == 0); 619 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 620 spdk_bdev_close(desc[8]); 621 622 /* 623 * Open bdev5 read/write. This should fail since bdev4 has been claimed 624 * by a vbdev module. 625 */ 626 rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]); 627 CU_ASSERT(rc == -EPERM); 628 629 /* Open bdev4 read-only. This should succeed. */ 630 rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]); 631 CU_ASSERT(rc == 0); 632 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 633 spdk_bdev_close(desc[5]); 634 635 free_vbdev(bdev[8]); 636 637 free_vbdev(bdev[5]); 638 free_vbdev(bdev[6]); 639 free_vbdev(bdev[7]); 640 641 free_vbdev(bdev[4]); 642 643 free_bdev(bdev[0]); 644 free_bdev(bdev[1]); 645 free_bdev(bdev[2]); 646 free_bdev(bdev[3]); 647 } 648 649 static void 650 bytes_to_blocks_test(void) 651 { 652 struct spdk_bdev bdev; 653 uint64_t offset_blocks, num_blocks; 654 655 memset(&bdev, 0, sizeof(bdev)); 656 657 bdev.blocklen = 512; 658 659 /* All parameters valid */ 660 offset_blocks = 0; 661 num_blocks = 0; 662 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 663 CU_ASSERT(offset_blocks == 1); 664 CU_ASSERT(num_blocks == 2); 665 666 /* Offset not a block multiple */ 667 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 668 669 /* Length not a block multiple */ 670 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 671 672 /* In case blocklen not the power of two */ 673 bdev.blocklen = 100; 674 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 675 CU_ASSERT(offset_blocks == 1); 676 CU_ASSERT(num_blocks == 2); 677 678 /* Offset not a block multiple */ 679 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 680 681 /* Length not a block multiple */ 682 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 683 } 684 685 static void 686 num_blocks_test(void) 687 { 688 struct spdk_bdev bdev; 689 struct spdk_bdev_desc *desc = NULL; 690 struct spdk_bdev_desc *desc_ext = NULL; 691 int rc; 692 693 memset(&bdev, 0, sizeof(bdev)); 694 bdev.name = "num_blocks"; 695 bdev.fn_table = &fn_table; 696 bdev.module = &bdev_ut_if; 697 spdk_bdev_register(&bdev); 698 spdk_bdev_notify_blockcnt_change(&bdev, 50); 699 700 /* Growing block number */ 701 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 702 /* Shrinking block number */ 703 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 704 705 /* In case bdev opened */ 706 rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc); 707 CU_ASSERT(rc == 0); 708 SPDK_CU_ASSERT_FATAL(desc != NULL); 709 710 /* Growing block number */ 711 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 712 /* Shrinking block number */ 713 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 714 715 /* In case bdev opened with ext API */ 716 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext); 717 CU_ASSERT(rc == 0); 718 SPDK_CU_ASSERT_FATAL(desc_ext != NULL); 719 720 g_event_type1 = 0xFF; 721 /* Growing block number */ 722 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 723 724 poll_threads(); 725 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 726 727 g_event_type1 = 0xFF; 728 /* Growing block number and closing */ 729 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 730 731 spdk_bdev_close(desc); 732 spdk_bdev_close(desc_ext); 733 spdk_bdev_unregister(&bdev, NULL, NULL); 734 735 poll_threads(); 736 737 /* Callback is not called for closed device */ 738 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 739 } 740 741 static void 742 io_valid_test(void) 743 { 744 struct spdk_bdev bdev; 745 746 memset(&bdev, 0, sizeof(bdev)); 747 748 bdev.blocklen = 512; 749 spdk_bdev_notify_blockcnt_change(&bdev, 100); 750 751 /* All parameters valid */ 752 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 753 754 /* Last valid block */ 755 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 756 757 /* Offset past end of bdev */ 758 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 759 760 /* Offset + length past end of bdev */ 761 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 762 763 /* Offset near end of uint64_t range (2^64 - 1) */ 764 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 765 } 766 767 static void 768 alias_add_del_test(void) 769 { 770 struct spdk_bdev *bdev[3]; 771 int rc; 772 773 /* Creating and registering bdevs */ 774 bdev[0] = allocate_bdev("bdev0"); 775 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 776 777 bdev[1] = allocate_bdev("bdev1"); 778 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 779 780 bdev[2] = allocate_bdev("bdev2"); 781 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 782 783 poll_threads(); 784 785 /* 786 * Trying adding an alias identical to name. 787 * Alias is identical to name, so it can not be added to aliases list 788 */ 789 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 790 CU_ASSERT(rc == -EEXIST); 791 792 /* 793 * Trying to add empty alias, 794 * this one should fail 795 */ 796 rc = spdk_bdev_alias_add(bdev[0], NULL); 797 CU_ASSERT(rc == -EINVAL); 798 799 /* Trying adding same alias to two different registered bdevs */ 800 801 /* Alias is used first time, so this one should pass */ 802 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 803 CU_ASSERT(rc == 0); 804 805 /* Alias was added to another bdev, so this one should fail */ 806 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 807 CU_ASSERT(rc == -EEXIST); 808 809 /* Alias is used first time, so this one should pass */ 810 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 811 CU_ASSERT(rc == 0); 812 813 /* Trying removing an alias from registered bdevs */ 814 815 /* Alias is not on a bdev aliases list, so this one should fail */ 816 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 817 CU_ASSERT(rc == -ENOENT); 818 819 /* Alias is present on a bdev aliases list, so this one should pass */ 820 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 821 CU_ASSERT(rc == 0); 822 823 /* Alias is present on a bdev aliases list, so this one should pass */ 824 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 825 CU_ASSERT(rc == 0); 826 827 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 828 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 829 CU_ASSERT(rc != 0); 830 831 /* Trying to del all alias from empty alias list */ 832 spdk_bdev_alias_del_all(bdev[2]); 833 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 834 835 /* Trying to del all alias from non-empty alias list */ 836 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 837 CU_ASSERT(rc == 0); 838 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 839 CU_ASSERT(rc == 0); 840 spdk_bdev_alias_del_all(bdev[2]); 841 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 842 843 /* Unregister and free bdevs */ 844 spdk_bdev_unregister(bdev[0], NULL, NULL); 845 spdk_bdev_unregister(bdev[1], NULL, NULL); 846 spdk_bdev_unregister(bdev[2], NULL, NULL); 847 848 poll_threads(); 849 850 free(bdev[0]); 851 free(bdev[1]); 852 free(bdev[2]); 853 } 854 855 static void 856 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 857 { 858 g_io_done = true; 859 g_io_status = bdev_io->internal.status; 860 spdk_bdev_free_io(bdev_io); 861 } 862 863 static void 864 bdev_init_cb(void *arg, int rc) 865 { 866 CU_ASSERT(rc == 0); 867 } 868 869 static void 870 bdev_fini_cb(void *arg) 871 { 872 } 873 874 struct bdev_ut_io_wait_entry { 875 struct spdk_bdev_io_wait_entry entry; 876 struct spdk_io_channel *io_ch; 877 struct spdk_bdev_desc *desc; 878 bool submitted; 879 }; 880 881 static void 882 io_wait_cb(void *arg) 883 { 884 struct bdev_ut_io_wait_entry *entry = arg; 885 int rc; 886 887 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 888 CU_ASSERT(rc == 0); 889 entry->submitted = true; 890 } 891 892 static void 893 bdev_io_types_test(void) 894 { 895 struct spdk_bdev *bdev; 896 struct spdk_bdev_desc *desc = NULL; 897 struct spdk_io_channel *io_ch; 898 struct spdk_bdev_opts bdev_opts = { 899 .bdev_io_pool_size = 4, 900 .bdev_io_cache_size = 2, 901 }; 902 int rc; 903 904 rc = spdk_bdev_set_opts(&bdev_opts); 905 CU_ASSERT(rc == 0); 906 spdk_bdev_initialize(bdev_init_cb, NULL); 907 poll_threads(); 908 909 bdev = allocate_bdev("bdev0"); 910 911 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 912 CU_ASSERT(rc == 0); 913 poll_threads(); 914 SPDK_CU_ASSERT_FATAL(desc != NULL); 915 io_ch = spdk_bdev_get_io_channel(desc); 916 CU_ASSERT(io_ch != NULL); 917 918 /* WRITE and WRITE ZEROES are not supported */ 919 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 920 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 921 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 922 CU_ASSERT(rc == -ENOTSUP); 923 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 924 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 925 926 spdk_put_io_channel(io_ch); 927 spdk_bdev_close(desc); 928 free_bdev(bdev); 929 spdk_bdev_finish(bdev_fini_cb, NULL); 930 poll_threads(); 931 } 932 933 static void 934 bdev_io_wait_test(void) 935 { 936 struct spdk_bdev *bdev; 937 struct spdk_bdev_desc *desc = NULL; 938 struct spdk_io_channel *io_ch; 939 struct spdk_bdev_opts bdev_opts = { 940 .bdev_io_pool_size = 4, 941 .bdev_io_cache_size = 2, 942 }; 943 struct bdev_ut_io_wait_entry io_wait_entry; 944 struct bdev_ut_io_wait_entry io_wait_entry2; 945 int rc; 946 947 rc = spdk_bdev_set_opts(&bdev_opts); 948 CU_ASSERT(rc == 0); 949 spdk_bdev_initialize(bdev_init_cb, NULL); 950 poll_threads(); 951 952 bdev = allocate_bdev("bdev0"); 953 954 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 955 CU_ASSERT(rc == 0); 956 poll_threads(); 957 SPDK_CU_ASSERT_FATAL(desc != NULL); 958 io_ch = spdk_bdev_get_io_channel(desc); 959 CU_ASSERT(io_ch != NULL); 960 961 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 962 CU_ASSERT(rc == 0); 963 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 964 CU_ASSERT(rc == 0); 965 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 966 CU_ASSERT(rc == 0); 967 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 968 CU_ASSERT(rc == 0); 969 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 970 971 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 972 CU_ASSERT(rc == -ENOMEM); 973 974 io_wait_entry.entry.bdev = bdev; 975 io_wait_entry.entry.cb_fn = io_wait_cb; 976 io_wait_entry.entry.cb_arg = &io_wait_entry; 977 io_wait_entry.io_ch = io_ch; 978 io_wait_entry.desc = desc; 979 io_wait_entry.submitted = false; 980 /* Cannot use the same io_wait_entry for two different calls. */ 981 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 982 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 983 984 /* Queue two I/O waits. */ 985 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 986 CU_ASSERT(rc == 0); 987 CU_ASSERT(io_wait_entry.submitted == false); 988 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 989 CU_ASSERT(rc == 0); 990 CU_ASSERT(io_wait_entry2.submitted == false); 991 992 stub_complete_io(1); 993 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 994 CU_ASSERT(io_wait_entry.submitted == true); 995 CU_ASSERT(io_wait_entry2.submitted == false); 996 997 stub_complete_io(1); 998 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 999 CU_ASSERT(io_wait_entry2.submitted == true); 1000 1001 stub_complete_io(4); 1002 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1003 1004 spdk_put_io_channel(io_ch); 1005 spdk_bdev_close(desc); 1006 free_bdev(bdev); 1007 spdk_bdev_finish(bdev_fini_cb, NULL); 1008 poll_threads(); 1009 } 1010 1011 static void 1012 bdev_io_spans_boundary_test(void) 1013 { 1014 struct spdk_bdev bdev; 1015 struct spdk_bdev_io bdev_io; 1016 1017 memset(&bdev, 0, sizeof(bdev)); 1018 1019 bdev.optimal_io_boundary = 0; 1020 bdev_io.bdev = &bdev; 1021 1022 /* bdev has no optimal_io_boundary set - so this should return false. */ 1023 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1024 1025 bdev.optimal_io_boundary = 32; 1026 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1027 1028 /* RESETs are not based on LBAs - so this should return false. */ 1029 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1030 1031 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1032 bdev_io.u.bdev.offset_blocks = 0; 1033 bdev_io.u.bdev.num_blocks = 32; 1034 1035 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1036 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1037 1038 bdev_io.u.bdev.num_blocks = 33; 1039 1040 /* This I/O spans a boundary. */ 1041 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1042 } 1043 1044 static void 1045 bdev_io_split_test(void) 1046 { 1047 struct spdk_bdev *bdev; 1048 struct spdk_bdev_desc *desc = NULL; 1049 struct spdk_io_channel *io_ch; 1050 struct spdk_bdev_opts bdev_opts = { 1051 .bdev_io_pool_size = 512, 1052 .bdev_io_cache_size = 64, 1053 }; 1054 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1055 struct ut_expected_io *expected_io; 1056 uint64_t i; 1057 int rc; 1058 1059 rc = spdk_bdev_set_opts(&bdev_opts); 1060 CU_ASSERT(rc == 0); 1061 spdk_bdev_initialize(bdev_init_cb, NULL); 1062 1063 bdev = allocate_bdev("bdev0"); 1064 1065 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1066 CU_ASSERT(rc == 0); 1067 SPDK_CU_ASSERT_FATAL(desc != NULL); 1068 io_ch = spdk_bdev_get_io_channel(desc); 1069 CU_ASSERT(io_ch != NULL); 1070 1071 bdev->optimal_io_boundary = 16; 1072 bdev->split_on_optimal_io_boundary = false; 1073 1074 g_io_done = false; 1075 1076 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1077 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1078 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1079 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1080 1081 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1082 CU_ASSERT(rc == 0); 1083 CU_ASSERT(g_io_done == false); 1084 1085 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1086 stub_complete_io(1); 1087 CU_ASSERT(g_io_done == true); 1088 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1089 1090 bdev->split_on_optimal_io_boundary = true; 1091 1092 /* Now test that a single-vector command is split correctly. 1093 * Offset 14, length 8, payload 0xF000 1094 * Child - Offset 14, length 2, payload 0xF000 1095 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1096 * 1097 * Set up the expected values before calling spdk_bdev_read_blocks 1098 */ 1099 g_io_done = false; 1100 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1101 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1102 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1103 1104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1105 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1106 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1107 1108 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1109 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1110 CU_ASSERT(rc == 0); 1111 CU_ASSERT(g_io_done == false); 1112 1113 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1114 stub_complete_io(2); 1115 CU_ASSERT(g_io_done == true); 1116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1117 1118 /* Now set up a more complex, multi-vector command that needs to be split, 1119 * including splitting iovecs. 1120 */ 1121 iov[0].iov_base = (void *)0x10000; 1122 iov[0].iov_len = 512; 1123 iov[1].iov_base = (void *)0x20000; 1124 iov[1].iov_len = 20 * 512; 1125 iov[2].iov_base = (void *)0x30000; 1126 iov[2].iov_len = 11 * 512; 1127 1128 g_io_done = false; 1129 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1130 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1131 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1132 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1133 1134 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1135 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1136 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1137 1138 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1139 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1140 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1141 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1142 1143 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1144 CU_ASSERT(rc == 0); 1145 CU_ASSERT(g_io_done == false); 1146 1147 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1148 stub_complete_io(3); 1149 CU_ASSERT(g_io_done == true); 1150 1151 /* Test multi vector command that needs to be split by strip and then needs to be 1152 * split further due to the capacity of child iovs. 1153 */ 1154 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1155 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1156 iov[i].iov_len = 512; 1157 } 1158 1159 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1160 g_io_done = false; 1161 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1162 BDEV_IO_NUM_CHILD_IOV); 1163 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1164 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1165 } 1166 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1167 1168 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1169 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1170 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1171 ut_expected_io_set_iov(expected_io, i, 1172 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1173 } 1174 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1175 1176 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1177 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1178 CU_ASSERT(rc == 0); 1179 CU_ASSERT(g_io_done == false); 1180 1181 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1182 stub_complete_io(1); 1183 CU_ASSERT(g_io_done == false); 1184 1185 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1186 stub_complete_io(1); 1187 CU_ASSERT(g_io_done == true); 1188 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1189 1190 /* Test multi vector command that needs to be split by strip and then needs to be 1191 * split further due to the capacity of child iovs. In this case, the length of 1192 * the rest of iovec array with an I/O boundary is the multiple of block size. 1193 */ 1194 1195 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1196 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1197 */ 1198 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1199 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1200 iov[i].iov_len = 512; 1201 } 1202 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1203 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1204 iov[i].iov_len = 256; 1205 } 1206 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1207 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1208 1209 /* Add an extra iovec to trigger split */ 1210 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1211 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1212 1213 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1214 g_io_done = false; 1215 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1216 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1217 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1218 ut_expected_io_set_iov(expected_io, i, 1219 (void *)((i + 1) * 0x10000), 512); 1220 } 1221 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1222 ut_expected_io_set_iov(expected_io, i, 1223 (void *)((i + 1) * 0x10000), 256); 1224 } 1225 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1226 1227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1228 1, 1); 1229 ut_expected_io_set_iov(expected_io, 0, 1230 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1231 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1232 1233 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1234 1, 1); 1235 ut_expected_io_set_iov(expected_io, 0, 1236 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1237 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1238 1239 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 1240 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1241 CU_ASSERT(rc == 0); 1242 CU_ASSERT(g_io_done == false); 1243 1244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1245 stub_complete_io(1); 1246 CU_ASSERT(g_io_done == false); 1247 1248 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1249 stub_complete_io(2); 1250 CU_ASSERT(g_io_done == true); 1251 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1252 1253 /* Test multi vector command that needs to be split by strip and then needs to be 1254 * split further due to the capacity of child iovs, the child request offset should 1255 * be rewind to last aligned offset and go success without error. 1256 */ 1257 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1258 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1259 iov[i].iov_len = 512; 1260 } 1261 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1262 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1263 1264 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1265 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1266 1267 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1268 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1269 1270 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1271 g_io_done = false; 1272 g_io_status = 0; 1273 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1275 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1276 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1277 ut_expected_io_set_iov(expected_io, i, 1278 (void *)((i + 1) * 0x10000), 512); 1279 } 1280 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1281 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1282 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1283 1, 2); 1284 ut_expected_io_set_iov(expected_io, 0, 1285 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1286 ut_expected_io_set_iov(expected_io, 1, 1287 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1288 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1289 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1290 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1291 1, 1); 1292 ut_expected_io_set_iov(expected_io, 0, 1293 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1294 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1295 1296 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1297 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1298 CU_ASSERT(rc == 0); 1299 CU_ASSERT(g_io_done == false); 1300 1301 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1302 stub_complete_io(1); 1303 CU_ASSERT(g_io_done == false); 1304 1305 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1306 stub_complete_io(2); 1307 CU_ASSERT(g_io_done == true); 1308 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1309 1310 /* Test multi vector command that needs to be split due to the IO boundary and 1311 * the capacity of child iovs. Especially test the case when the command is 1312 * split due to the capacity of child iovs, the tail address is not aligned with 1313 * block size and is rewinded to the aligned address. 1314 * 1315 * The iovecs used in read request is complex but is based on the data 1316 * collected in the real issue. We change the base addresses but keep the lengths 1317 * not to loose the credibility of the test. 1318 */ 1319 bdev->optimal_io_boundary = 128; 1320 g_io_done = false; 1321 g_io_status = 0; 1322 1323 for (i = 0; i < 31; i++) { 1324 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1325 iov[i].iov_len = 1024; 1326 } 1327 iov[31].iov_base = (void *)0xFEED1F00000; 1328 iov[31].iov_len = 32768; 1329 iov[32].iov_base = (void *)0xFEED2000000; 1330 iov[32].iov_len = 160; 1331 iov[33].iov_base = (void *)0xFEED2100000; 1332 iov[33].iov_len = 4096; 1333 iov[34].iov_base = (void *)0xFEED2200000; 1334 iov[34].iov_len = 4096; 1335 iov[35].iov_base = (void *)0xFEED2300000; 1336 iov[35].iov_len = 4096; 1337 iov[36].iov_base = (void *)0xFEED2400000; 1338 iov[36].iov_len = 4096; 1339 iov[37].iov_base = (void *)0xFEED2500000; 1340 iov[37].iov_len = 4096; 1341 iov[38].iov_base = (void *)0xFEED2600000; 1342 iov[38].iov_len = 4096; 1343 iov[39].iov_base = (void *)0xFEED2700000; 1344 iov[39].iov_len = 4096; 1345 iov[40].iov_base = (void *)0xFEED2800000; 1346 iov[40].iov_len = 4096; 1347 iov[41].iov_base = (void *)0xFEED2900000; 1348 iov[41].iov_len = 4096; 1349 iov[42].iov_base = (void *)0xFEED2A00000; 1350 iov[42].iov_len = 4096; 1351 iov[43].iov_base = (void *)0xFEED2B00000; 1352 iov[43].iov_len = 12288; 1353 iov[44].iov_base = (void *)0xFEED2C00000; 1354 iov[44].iov_len = 8192; 1355 iov[45].iov_base = (void *)0xFEED2F00000; 1356 iov[45].iov_len = 4096; 1357 iov[46].iov_base = (void *)0xFEED3000000; 1358 iov[46].iov_len = 4096; 1359 iov[47].iov_base = (void *)0xFEED3100000; 1360 iov[47].iov_len = 4096; 1361 iov[48].iov_base = (void *)0xFEED3200000; 1362 iov[48].iov_len = 24576; 1363 iov[49].iov_base = (void *)0xFEED3300000; 1364 iov[49].iov_len = 16384; 1365 iov[50].iov_base = (void *)0xFEED3400000; 1366 iov[50].iov_len = 12288; 1367 iov[51].iov_base = (void *)0xFEED3500000; 1368 iov[51].iov_len = 4096; 1369 iov[52].iov_base = (void *)0xFEED3600000; 1370 iov[52].iov_len = 4096; 1371 iov[53].iov_base = (void *)0xFEED3700000; 1372 iov[53].iov_len = 4096; 1373 iov[54].iov_base = (void *)0xFEED3800000; 1374 iov[54].iov_len = 28672; 1375 iov[55].iov_base = (void *)0xFEED3900000; 1376 iov[55].iov_len = 20480; 1377 iov[56].iov_base = (void *)0xFEED3A00000; 1378 iov[56].iov_len = 4096; 1379 iov[57].iov_base = (void *)0xFEED3B00000; 1380 iov[57].iov_len = 12288; 1381 iov[58].iov_base = (void *)0xFEED3C00000; 1382 iov[58].iov_len = 4096; 1383 iov[59].iov_base = (void *)0xFEED3D00000; 1384 iov[59].iov_len = 4096; 1385 iov[60].iov_base = (void *)0xFEED3E00000; 1386 iov[60].iov_len = 352; 1387 1388 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1389 * of child iovs, 1390 */ 1391 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1392 for (i = 0; i < 32; i++) { 1393 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1394 } 1395 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1396 1397 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1398 * split by the IO boundary requirement. 1399 */ 1400 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1401 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1402 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1403 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1404 1405 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1406 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1407 */ 1408 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1409 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1410 iov[33].iov_len - 864); 1411 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1412 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1413 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1414 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1415 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1416 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1417 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1418 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1419 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1420 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1421 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1422 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1423 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1425 1426 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1427 * first 864 bytes of iov[52] split by the IO boundary requirement. 1428 */ 1429 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1430 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1431 iov[46].iov_len - 864); 1432 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1433 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1434 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1435 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1436 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1437 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1438 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1439 1440 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1441 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1442 */ 1443 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1444 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1445 iov[52].iov_len - 864); 1446 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1447 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1448 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1449 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1450 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1451 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1452 1453 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1454 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1455 */ 1456 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1457 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1458 iov[57].iov_len - 4960); 1459 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1460 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1461 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1462 1463 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1464 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1465 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1466 iov[59].iov_len - 3936); 1467 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1468 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1469 1470 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 61, 0, 543, io_done, NULL); 1471 CU_ASSERT(rc == 0); 1472 CU_ASSERT(g_io_done == false); 1473 1474 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1475 stub_complete_io(1); 1476 CU_ASSERT(g_io_done == false); 1477 1478 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1479 stub_complete_io(5); 1480 CU_ASSERT(g_io_done == false); 1481 1482 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1483 stub_complete_io(1); 1484 CU_ASSERT(g_io_done == true); 1485 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1486 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1487 1488 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1489 * split, so test that. 1490 */ 1491 bdev->optimal_io_boundary = 15; 1492 g_io_done = false; 1493 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1495 1496 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1497 CU_ASSERT(rc == 0); 1498 CU_ASSERT(g_io_done == false); 1499 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1500 stub_complete_io(1); 1501 CU_ASSERT(g_io_done == true); 1502 1503 /* Test an UNMAP. This should also not be split. */ 1504 bdev->optimal_io_boundary = 16; 1505 g_io_done = false; 1506 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1507 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1508 1509 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1510 CU_ASSERT(rc == 0); 1511 CU_ASSERT(g_io_done == false); 1512 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1513 stub_complete_io(1); 1514 CU_ASSERT(g_io_done == true); 1515 1516 /* Test a FLUSH. This should also not be split. */ 1517 bdev->optimal_io_boundary = 16; 1518 g_io_done = false; 1519 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1520 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1521 1522 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1523 CU_ASSERT(rc == 0); 1524 CU_ASSERT(g_io_done == false); 1525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1526 stub_complete_io(1); 1527 CU_ASSERT(g_io_done == true); 1528 1529 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1530 1531 /* Children requests return an error status */ 1532 bdev->optimal_io_boundary = 16; 1533 iov[0].iov_base = (void *)0x10000; 1534 iov[0].iov_len = 512 * 64; 1535 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1536 g_io_done = false; 1537 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1538 1539 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1540 CU_ASSERT(rc == 0); 1541 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1542 stub_complete_io(4); 1543 CU_ASSERT(g_io_done == false); 1544 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1545 stub_complete_io(1); 1546 CU_ASSERT(g_io_done == true); 1547 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1548 1549 /* for this test we will create the following conditions to hit the code path where 1550 * we are trying to send and IO following a split that has no iovs because we had to 1551 * trim them for alignment reasons. 1552 * 1553 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1554 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1555 * position 30 and overshoot by 0x2e. 1556 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1557 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1558 * which eliniates that vector so we just send the first split IO with 30 vectors 1559 * and let the completion pick up the last 2 vectors. 1560 */ 1561 bdev->optimal_io_boundary = 32; 1562 bdev->split_on_optimal_io_boundary = true; 1563 g_io_done = false; 1564 1565 /* Init all parent IOVs to 0x212 */ 1566 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1567 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1568 iov[i].iov_len = 0x212; 1569 } 1570 1571 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1572 BDEV_IO_NUM_CHILD_IOV - 1); 1573 /* expect 0-29 to be 1:1 with the parent iov */ 1574 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1575 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1576 } 1577 1578 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1579 * where 0x1e is the amount we overshot the 16K boundary 1580 */ 1581 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1582 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1583 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1584 1585 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1586 * shortened that take it to the next boundary and then a final one to get us to 1587 * 0x4200 bytes for the IO. 1588 */ 1589 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1590 BDEV_IO_NUM_CHILD_IOV, 2); 1591 /* position 30 picked up the remaining bytes to the next boundary */ 1592 ut_expected_io_set_iov(expected_io, 0, 1593 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1594 1595 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1596 ut_expected_io_set_iov(expected_io, 1, 1597 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1598 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1599 1600 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1601 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1602 CU_ASSERT(rc == 0); 1603 CU_ASSERT(g_io_done == false); 1604 1605 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1606 stub_complete_io(1); 1607 CU_ASSERT(g_io_done == false); 1608 1609 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1610 stub_complete_io(1); 1611 CU_ASSERT(g_io_done == true); 1612 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1613 1614 spdk_put_io_channel(io_ch); 1615 spdk_bdev_close(desc); 1616 free_bdev(bdev); 1617 spdk_bdev_finish(bdev_fini_cb, NULL); 1618 poll_threads(); 1619 } 1620 1621 static void 1622 bdev_io_split_with_io_wait(void) 1623 { 1624 struct spdk_bdev *bdev; 1625 struct spdk_bdev_desc *desc = NULL; 1626 struct spdk_io_channel *io_ch; 1627 struct spdk_bdev_channel *channel; 1628 struct spdk_bdev_mgmt_channel *mgmt_ch; 1629 struct spdk_bdev_opts bdev_opts = { 1630 .bdev_io_pool_size = 2, 1631 .bdev_io_cache_size = 1, 1632 }; 1633 struct iovec iov[3]; 1634 struct ut_expected_io *expected_io; 1635 int rc; 1636 1637 rc = spdk_bdev_set_opts(&bdev_opts); 1638 CU_ASSERT(rc == 0); 1639 spdk_bdev_initialize(bdev_init_cb, NULL); 1640 1641 bdev = allocate_bdev("bdev0"); 1642 1643 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1644 CU_ASSERT(rc == 0); 1645 CU_ASSERT(desc != NULL); 1646 io_ch = spdk_bdev_get_io_channel(desc); 1647 CU_ASSERT(io_ch != NULL); 1648 channel = spdk_io_channel_get_ctx(io_ch); 1649 mgmt_ch = channel->shared_resource->mgmt_ch; 1650 1651 bdev->optimal_io_boundary = 16; 1652 bdev->split_on_optimal_io_boundary = true; 1653 1654 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1655 CU_ASSERT(rc == 0); 1656 1657 /* Now test that a single-vector command is split correctly. 1658 * Offset 14, length 8, payload 0xF000 1659 * Child - Offset 14, length 2, payload 0xF000 1660 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1661 * 1662 * Set up the expected values before calling spdk_bdev_read_blocks 1663 */ 1664 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1665 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1666 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1667 1668 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1669 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1670 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1671 1672 /* The following children will be submitted sequentially due to the capacity of 1673 * spdk_bdev_io. 1674 */ 1675 1676 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 1677 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1678 CU_ASSERT(rc == 0); 1679 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1680 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1681 1682 /* Completing the first read I/O will submit the first child */ 1683 stub_complete_io(1); 1684 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1685 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1686 1687 /* Completing the first child will submit the second child */ 1688 stub_complete_io(1); 1689 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1690 1691 /* Complete the second child I/O. This should result in our callback getting 1692 * invoked since the parent I/O is now complete. 1693 */ 1694 stub_complete_io(1); 1695 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1696 1697 /* Now set up a more complex, multi-vector command that needs to be split, 1698 * including splitting iovecs. 1699 */ 1700 iov[0].iov_base = (void *)0x10000; 1701 iov[0].iov_len = 512; 1702 iov[1].iov_base = (void *)0x20000; 1703 iov[1].iov_len = 20 * 512; 1704 iov[2].iov_base = (void *)0x30000; 1705 iov[2].iov_len = 11 * 512; 1706 1707 g_io_done = false; 1708 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1709 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1710 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1711 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1712 1713 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1714 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1715 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1716 1717 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1718 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1719 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1720 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1721 1722 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1723 CU_ASSERT(rc == 0); 1724 CU_ASSERT(g_io_done == false); 1725 1726 /* The following children will be submitted sequentially due to the capacity of 1727 * spdk_bdev_io. 1728 */ 1729 1730 /* Completing the first child will submit the second child */ 1731 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1732 stub_complete_io(1); 1733 CU_ASSERT(g_io_done == false); 1734 1735 /* Completing the second child will submit the third child */ 1736 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1737 stub_complete_io(1); 1738 CU_ASSERT(g_io_done == false); 1739 1740 /* Completing the third child will result in our callback getting invoked 1741 * since the parent I/O is now complete. 1742 */ 1743 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1744 stub_complete_io(1); 1745 CU_ASSERT(g_io_done == true); 1746 1747 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1748 1749 spdk_put_io_channel(io_ch); 1750 spdk_bdev_close(desc); 1751 free_bdev(bdev); 1752 spdk_bdev_finish(bdev_fini_cb, NULL); 1753 poll_threads(); 1754 } 1755 1756 static void 1757 bdev_io_alignment(void) 1758 { 1759 struct spdk_bdev *bdev; 1760 struct spdk_bdev_desc *desc = NULL; 1761 struct spdk_io_channel *io_ch; 1762 struct spdk_bdev_opts bdev_opts = { 1763 .bdev_io_pool_size = 20, 1764 .bdev_io_cache_size = 2, 1765 }; 1766 int rc; 1767 void *buf; 1768 struct iovec iovs[2]; 1769 int iovcnt; 1770 uint64_t alignment; 1771 1772 rc = spdk_bdev_set_opts(&bdev_opts); 1773 CU_ASSERT(rc == 0); 1774 spdk_bdev_initialize(bdev_init_cb, NULL); 1775 1776 fn_table.submit_request = stub_submit_request_get_buf; 1777 bdev = allocate_bdev("bdev0"); 1778 1779 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1780 CU_ASSERT(rc == 0); 1781 CU_ASSERT(desc != NULL); 1782 io_ch = spdk_bdev_get_io_channel(desc); 1783 CU_ASSERT(io_ch != NULL); 1784 1785 /* Create aligned buffer */ 1786 rc = posix_memalign(&buf, 4096, 8192); 1787 SPDK_CU_ASSERT_FATAL(rc == 0); 1788 1789 /* Pass aligned single buffer with no alignment required */ 1790 alignment = 1; 1791 bdev->required_alignment = spdk_u32log2(alignment); 1792 1793 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1794 CU_ASSERT(rc == 0); 1795 stub_complete_io(1); 1796 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1797 alignment)); 1798 1799 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1800 CU_ASSERT(rc == 0); 1801 stub_complete_io(1); 1802 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1803 alignment)); 1804 1805 /* Pass unaligned single buffer with no alignment required */ 1806 alignment = 1; 1807 bdev->required_alignment = spdk_u32log2(alignment); 1808 1809 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1810 CU_ASSERT(rc == 0); 1811 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1812 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1813 stub_complete_io(1); 1814 1815 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1816 CU_ASSERT(rc == 0); 1817 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1818 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1819 stub_complete_io(1); 1820 1821 /* Pass unaligned single buffer with 512 alignment required */ 1822 alignment = 512; 1823 bdev->required_alignment = spdk_u32log2(alignment); 1824 1825 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1826 CU_ASSERT(rc == 0); 1827 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1828 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1829 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1830 alignment)); 1831 stub_complete_io(1); 1832 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1833 1834 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1835 CU_ASSERT(rc == 0); 1836 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1837 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1838 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1839 alignment)); 1840 stub_complete_io(1); 1841 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1842 1843 /* Pass unaligned single buffer with 4096 alignment required */ 1844 alignment = 4096; 1845 bdev->required_alignment = spdk_u32log2(alignment); 1846 1847 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1848 CU_ASSERT(rc == 0); 1849 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1850 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1851 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1852 alignment)); 1853 stub_complete_io(1); 1854 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1855 1856 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1857 CU_ASSERT(rc == 0); 1858 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1859 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1860 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1861 alignment)); 1862 stub_complete_io(1); 1863 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1864 1865 /* Pass aligned iovs with no alignment required */ 1866 alignment = 1; 1867 bdev->required_alignment = spdk_u32log2(alignment); 1868 1869 iovcnt = 1; 1870 iovs[0].iov_base = buf; 1871 iovs[0].iov_len = 512; 1872 1873 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1874 CU_ASSERT(rc == 0); 1875 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1876 stub_complete_io(1); 1877 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1878 1879 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1880 CU_ASSERT(rc == 0); 1881 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1882 stub_complete_io(1); 1883 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1884 1885 /* Pass unaligned iovs with no alignment required */ 1886 alignment = 1; 1887 bdev->required_alignment = spdk_u32log2(alignment); 1888 1889 iovcnt = 2; 1890 iovs[0].iov_base = buf + 16; 1891 iovs[0].iov_len = 256; 1892 iovs[1].iov_base = buf + 16 + 256 + 32; 1893 iovs[1].iov_len = 256; 1894 1895 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1896 CU_ASSERT(rc == 0); 1897 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1898 stub_complete_io(1); 1899 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1900 1901 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1902 CU_ASSERT(rc == 0); 1903 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1904 stub_complete_io(1); 1905 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1906 1907 /* Pass unaligned iov with 2048 alignment required */ 1908 alignment = 2048; 1909 bdev->required_alignment = spdk_u32log2(alignment); 1910 1911 iovcnt = 2; 1912 iovs[0].iov_base = buf + 16; 1913 iovs[0].iov_len = 256; 1914 iovs[1].iov_base = buf + 16 + 256 + 32; 1915 iovs[1].iov_len = 256; 1916 1917 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1918 CU_ASSERT(rc == 0); 1919 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1920 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1921 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1922 alignment)); 1923 stub_complete_io(1); 1924 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1925 1926 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1927 CU_ASSERT(rc == 0); 1928 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1929 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1930 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1931 alignment)); 1932 stub_complete_io(1); 1933 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1934 1935 /* Pass iov without allocated buffer without alignment required */ 1936 alignment = 1; 1937 bdev->required_alignment = spdk_u32log2(alignment); 1938 1939 iovcnt = 1; 1940 iovs[0].iov_base = NULL; 1941 iovs[0].iov_len = 0; 1942 1943 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1944 CU_ASSERT(rc == 0); 1945 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1946 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1947 alignment)); 1948 stub_complete_io(1); 1949 1950 /* Pass iov without allocated buffer with 1024 alignment required */ 1951 alignment = 1024; 1952 bdev->required_alignment = spdk_u32log2(alignment); 1953 1954 iovcnt = 1; 1955 iovs[0].iov_base = NULL; 1956 iovs[0].iov_len = 0; 1957 1958 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1959 CU_ASSERT(rc == 0); 1960 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1961 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1962 alignment)); 1963 stub_complete_io(1); 1964 1965 spdk_put_io_channel(io_ch); 1966 spdk_bdev_close(desc); 1967 free_bdev(bdev); 1968 fn_table.submit_request = stub_submit_request; 1969 spdk_bdev_finish(bdev_fini_cb, NULL); 1970 poll_threads(); 1971 1972 free(buf); 1973 } 1974 1975 static void 1976 bdev_io_alignment_with_boundary(void) 1977 { 1978 struct spdk_bdev *bdev; 1979 struct spdk_bdev_desc *desc = NULL; 1980 struct spdk_io_channel *io_ch; 1981 struct spdk_bdev_opts bdev_opts = { 1982 .bdev_io_pool_size = 20, 1983 .bdev_io_cache_size = 2, 1984 }; 1985 int rc; 1986 void *buf; 1987 struct iovec iovs[2]; 1988 int iovcnt; 1989 uint64_t alignment; 1990 1991 rc = spdk_bdev_set_opts(&bdev_opts); 1992 CU_ASSERT(rc == 0); 1993 spdk_bdev_initialize(bdev_init_cb, NULL); 1994 1995 fn_table.submit_request = stub_submit_request_get_buf; 1996 bdev = allocate_bdev("bdev0"); 1997 1998 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1999 CU_ASSERT(rc == 0); 2000 CU_ASSERT(desc != NULL); 2001 io_ch = spdk_bdev_get_io_channel(desc); 2002 CU_ASSERT(io_ch != NULL); 2003 2004 /* Create aligned buffer */ 2005 rc = posix_memalign(&buf, 4096, 131072); 2006 SPDK_CU_ASSERT_FATAL(rc == 0); 2007 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2008 2009 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2010 alignment = 512; 2011 bdev->required_alignment = spdk_u32log2(alignment); 2012 bdev->optimal_io_boundary = 2; 2013 bdev->split_on_optimal_io_boundary = true; 2014 2015 iovcnt = 1; 2016 iovs[0].iov_base = NULL; 2017 iovs[0].iov_len = 512 * 3; 2018 2019 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2020 CU_ASSERT(rc == 0); 2021 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2022 stub_complete_io(2); 2023 2024 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2025 alignment = 512; 2026 bdev->required_alignment = spdk_u32log2(alignment); 2027 bdev->optimal_io_boundary = 16; 2028 bdev->split_on_optimal_io_boundary = true; 2029 2030 iovcnt = 1; 2031 iovs[0].iov_base = NULL; 2032 iovs[0].iov_len = 512 * 16; 2033 2034 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2035 CU_ASSERT(rc == 0); 2036 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2037 stub_complete_io(2); 2038 2039 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2040 alignment = 512; 2041 bdev->required_alignment = spdk_u32log2(alignment); 2042 bdev->optimal_io_boundary = 128; 2043 bdev->split_on_optimal_io_boundary = true; 2044 2045 iovcnt = 1; 2046 iovs[0].iov_base = buf + 16; 2047 iovs[0].iov_len = 512 * 160; 2048 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2049 CU_ASSERT(rc == 0); 2050 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2051 stub_complete_io(2); 2052 2053 /* 512 * 3 with 2 IO boundary */ 2054 alignment = 512; 2055 bdev->required_alignment = spdk_u32log2(alignment); 2056 bdev->optimal_io_boundary = 2; 2057 bdev->split_on_optimal_io_boundary = true; 2058 2059 iovcnt = 2; 2060 iovs[0].iov_base = buf + 16; 2061 iovs[0].iov_len = 512; 2062 iovs[1].iov_base = buf + 16 + 512 + 32; 2063 iovs[1].iov_len = 1024; 2064 2065 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2066 CU_ASSERT(rc == 0); 2067 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2068 stub_complete_io(2); 2069 2070 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2071 CU_ASSERT(rc == 0); 2072 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2073 stub_complete_io(2); 2074 2075 /* 512 * 64 with 32 IO boundary */ 2076 bdev->optimal_io_boundary = 32; 2077 iovcnt = 2; 2078 iovs[0].iov_base = buf + 16; 2079 iovs[0].iov_len = 16384; 2080 iovs[1].iov_base = buf + 16 + 16384 + 32; 2081 iovs[1].iov_len = 16384; 2082 2083 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2084 CU_ASSERT(rc == 0); 2085 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2086 stub_complete_io(3); 2087 2088 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2089 CU_ASSERT(rc == 0); 2090 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2091 stub_complete_io(3); 2092 2093 /* 512 * 160 with 32 IO boundary */ 2094 iovcnt = 1; 2095 iovs[0].iov_base = buf + 16; 2096 iovs[0].iov_len = 16384 + 65536; 2097 2098 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2099 CU_ASSERT(rc == 0); 2100 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2101 stub_complete_io(6); 2102 2103 spdk_put_io_channel(io_ch); 2104 spdk_bdev_close(desc); 2105 free_bdev(bdev); 2106 fn_table.submit_request = stub_submit_request; 2107 spdk_bdev_finish(bdev_fini_cb, NULL); 2108 poll_threads(); 2109 2110 free(buf); 2111 } 2112 2113 static void 2114 histogram_status_cb(void *cb_arg, int status) 2115 { 2116 g_status = status; 2117 } 2118 2119 static void 2120 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 2121 { 2122 g_status = status; 2123 g_histogram = histogram; 2124 } 2125 2126 static void 2127 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 2128 uint64_t total, uint64_t so_far) 2129 { 2130 g_count += count; 2131 } 2132 2133 static void 2134 bdev_histograms(void) 2135 { 2136 struct spdk_bdev *bdev; 2137 struct spdk_bdev_desc *desc = NULL; 2138 struct spdk_io_channel *ch; 2139 struct spdk_histogram_data *histogram; 2140 uint8_t buf[4096]; 2141 int rc; 2142 2143 spdk_bdev_initialize(bdev_init_cb, NULL); 2144 2145 bdev = allocate_bdev("bdev"); 2146 2147 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2148 CU_ASSERT(rc == 0); 2149 CU_ASSERT(desc != NULL); 2150 2151 ch = spdk_bdev_get_io_channel(desc); 2152 CU_ASSERT(ch != NULL); 2153 2154 /* Enable histogram */ 2155 g_status = -1; 2156 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 2157 poll_threads(); 2158 CU_ASSERT(g_status == 0); 2159 CU_ASSERT(bdev->internal.histogram_enabled == true); 2160 2161 /* Allocate histogram */ 2162 histogram = spdk_histogram_data_alloc(); 2163 SPDK_CU_ASSERT_FATAL(histogram != NULL); 2164 2165 /* Check if histogram is zeroed */ 2166 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2167 poll_threads(); 2168 CU_ASSERT(g_status == 0); 2169 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2170 2171 g_count = 0; 2172 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2173 2174 CU_ASSERT(g_count == 0); 2175 2176 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2177 CU_ASSERT(rc == 0); 2178 2179 spdk_delay_us(10); 2180 stub_complete_io(1); 2181 poll_threads(); 2182 2183 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2184 CU_ASSERT(rc == 0); 2185 2186 spdk_delay_us(10); 2187 stub_complete_io(1); 2188 poll_threads(); 2189 2190 /* Check if histogram gathered data from all I/O channels */ 2191 g_histogram = NULL; 2192 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2193 poll_threads(); 2194 CU_ASSERT(g_status == 0); 2195 CU_ASSERT(bdev->internal.histogram_enabled == true); 2196 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2197 2198 g_count = 0; 2199 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2200 CU_ASSERT(g_count == 2); 2201 2202 /* Disable histogram */ 2203 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 2204 poll_threads(); 2205 CU_ASSERT(g_status == 0); 2206 CU_ASSERT(bdev->internal.histogram_enabled == false); 2207 2208 /* Try to run histogram commands on disabled bdev */ 2209 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2210 poll_threads(); 2211 CU_ASSERT(g_status == -EFAULT); 2212 2213 spdk_histogram_data_free(histogram); 2214 spdk_put_io_channel(ch); 2215 spdk_bdev_close(desc); 2216 free_bdev(bdev); 2217 spdk_bdev_finish(bdev_fini_cb, NULL); 2218 poll_threads(); 2219 } 2220 2221 static void 2222 _bdev_compare(bool emulated) 2223 { 2224 struct spdk_bdev *bdev; 2225 struct spdk_bdev_desc *desc = NULL; 2226 struct spdk_io_channel *ioch; 2227 struct ut_expected_io *expected_io; 2228 uint64_t offset, num_blocks; 2229 uint32_t num_completed; 2230 char aa_buf[512]; 2231 char bb_buf[512]; 2232 struct iovec compare_iov; 2233 uint8_t io_type; 2234 int rc; 2235 2236 if (emulated) { 2237 io_type = SPDK_BDEV_IO_TYPE_READ; 2238 } else { 2239 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 2240 } 2241 2242 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2243 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2244 2245 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 2246 2247 spdk_bdev_initialize(bdev_init_cb, NULL); 2248 fn_table.submit_request = stub_submit_request_get_buf; 2249 bdev = allocate_bdev("bdev"); 2250 2251 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2252 CU_ASSERT_EQUAL(rc, 0); 2253 SPDK_CU_ASSERT_FATAL(desc != NULL); 2254 ioch = spdk_bdev_get_io_channel(desc); 2255 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2256 2257 fn_table.submit_request = stub_submit_request_get_buf; 2258 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2259 2260 offset = 50; 2261 num_blocks = 1; 2262 compare_iov.iov_base = aa_buf; 2263 compare_iov.iov_len = sizeof(aa_buf); 2264 2265 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2266 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2267 2268 g_io_done = false; 2269 g_compare_read_buf = aa_buf; 2270 g_compare_read_buf_len = sizeof(aa_buf); 2271 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2272 CU_ASSERT_EQUAL(rc, 0); 2273 num_completed = stub_complete_io(1); 2274 CU_ASSERT_EQUAL(num_completed, 1); 2275 CU_ASSERT(g_io_done == true); 2276 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2277 2278 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2279 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2280 2281 g_io_done = false; 2282 g_compare_read_buf = bb_buf; 2283 g_compare_read_buf_len = sizeof(bb_buf); 2284 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2285 CU_ASSERT_EQUAL(rc, 0); 2286 num_completed = stub_complete_io(1); 2287 CU_ASSERT_EQUAL(num_completed, 1); 2288 CU_ASSERT(g_io_done == true); 2289 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2290 2291 spdk_put_io_channel(ioch); 2292 spdk_bdev_close(desc); 2293 free_bdev(bdev); 2294 fn_table.submit_request = stub_submit_request; 2295 spdk_bdev_finish(bdev_fini_cb, NULL); 2296 poll_threads(); 2297 2298 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2299 2300 g_compare_read_buf = NULL; 2301 } 2302 2303 static void 2304 bdev_compare(void) 2305 { 2306 _bdev_compare(true); 2307 _bdev_compare(false); 2308 } 2309 2310 static void 2311 bdev_compare_and_write(void) 2312 { 2313 struct spdk_bdev *bdev; 2314 struct spdk_bdev_desc *desc = NULL; 2315 struct spdk_io_channel *ioch; 2316 struct ut_expected_io *expected_io; 2317 uint64_t offset, num_blocks; 2318 uint32_t num_completed; 2319 char aa_buf[512]; 2320 char bb_buf[512]; 2321 char cc_buf[512]; 2322 char write_buf[512]; 2323 struct iovec compare_iov; 2324 struct iovec write_iov; 2325 int rc; 2326 2327 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2328 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2329 memset(cc_buf, 0xcc, sizeof(cc_buf)); 2330 2331 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 2332 2333 spdk_bdev_initialize(bdev_init_cb, NULL); 2334 fn_table.submit_request = stub_submit_request_get_buf; 2335 bdev = allocate_bdev("bdev"); 2336 2337 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2338 CU_ASSERT_EQUAL(rc, 0); 2339 SPDK_CU_ASSERT_FATAL(desc != NULL); 2340 ioch = spdk_bdev_get_io_channel(desc); 2341 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2342 2343 fn_table.submit_request = stub_submit_request_get_buf; 2344 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2345 2346 offset = 50; 2347 num_blocks = 1; 2348 compare_iov.iov_base = aa_buf; 2349 compare_iov.iov_len = sizeof(aa_buf); 2350 write_iov.iov_base = bb_buf; 2351 write_iov.iov_len = sizeof(bb_buf); 2352 2353 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2354 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2355 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 2356 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2357 2358 g_io_done = false; 2359 g_compare_read_buf = aa_buf; 2360 g_compare_read_buf_len = sizeof(aa_buf); 2361 memset(write_buf, 0, sizeof(write_buf)); 2362 g_compare_write_buf = write_buf; 2363 g_compare_write_buf_len = sizeof(write_buf); 2364 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2365 offset, num_blocks, io_done, NULL); 2366 /* Trigger range locking */ 2367 poll_threads(); 2368 CU_ASSERT_EQUAL(rc, 0); 2369 num_completed = stub_complete_io(1); 2370 CU_ASSERT_EQUAL(num_completed, 1); 2371 CU_ASSERT(g_io_done == false); 2372 num_completed = stub_complete_io(1); 2373 /* Trigger range unlocking */ 2374 poll_threads(); 2375 CU_ASSERT_EQUAL(num_completed, 1); 2376 CU_ASSERT(g_io_done == true); 2377 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2378 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 2379 2380 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2381 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2382 2383 g_io_done = false; 2384 g_compare_read_buf = cc_buf; 2385 g_compare_read_buf_len = sizeof(cc_buf); 2386 memset(write_buf, 0, sizeof(write_buf)); 2387 g_compare_write_buf = write_buf; 2388 g_compare_write_buf_len = sizeof(write_buf); 2389 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2390 offset, num_blocks, io_done, NULL); 2391 /* Trigger range locking */ 2392 poll_threads(); 2393 CU_ASSERT_EQUAL(rc, 0); 2394 num_completed = stub_complete_io(1); 2395 /* Trigger range unlocking earlier because we expect error here */ 2396 poll_threads(); 2397 CU_ASSERT_EQUAL(num_completed, 1); 2398 CU_ASSERT(g_io_done == true); 2399 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2400 num_completed = stub_complete_io(1); 2401 CU_ASSERT_EQUAL(num_completed, 0); 2402 2403 spdk_put_io_channel(ioch); 2404 spdk_bdev_close(desc); 2405 free_bdev(bdev); 2406 fn_table.submit_request = stub_submit_request; 2407 spdk_bdev_finish(bdev_fini_cb, NULL); 2408 poll_threads(); 2409 2410 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2411 2412 g_compare_read_buf = NULL; 2413 g_compare_write_buf = NULL; 2414 } 2415 2416 static void 2417 bdev_write_zeroes(void) 2418 { 2419 struct spdk_bdev *bdev; 2420 struct spdk_bdev_desc *desc = NULL; 2421 struct spdk_io_channel *ioch; 2422 struct ut_expected_io *expected_io; 2423 uint64_t offset, num_io_blocks, num_blocks; 2424 uint32_t num_completed, num_requests; 2425 int rc; 2426 2427 spdk_bdev_initialize(bdev_init_cb, NULL); 2428 bdev = allocate_bdev("bdev"); 2429 2430 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2431 CU_ASSERT_EQUAL(rc, 0); 2432 SPDK_CU_ASSERT_FATAL(desc != NULL); 2433 ioch = spdk_bdev_get_io_channel(desc); 2434 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2435 2436 fn_table.submit_request = stub_submit_request; 2437 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2438 2439 /* First test that if the bdev supports write_zeroes, the request won't be split */ 2440 bdev->md_len = 0; 2441 bdev->blocklen = 4096; 2442 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2443 2444 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 2445 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2446 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2447 CU_ASSERT_EQUAL(rc, 0); 2448 num_completed = stub_complete_io(1); 2449 CU_ASSERT_EQUAL(num_completed, 1); 2450 2451 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 2452 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 2453 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 2454 num_requests = 2; 2455 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 2456 2457 for (offset = 0; offset < num_requests; ++offset) { 2458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2459 offset * num_io_blocks, num_io_blocks, 0); 2460 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2461 } 2462 2463 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2464 CU_ASSERT_EQUAL(rc, 0); 2465 num_completed = stub_complete_io(num_requests); 2466 CU_ASSERT_EQUAL(num_completed, num_requests); 2467 2468 /* Check that the splitting is correct if bdev has interleaved metadata */ 2469 bdev->md_interleave = true; 2470 bdev->md_len = 64; 2471 bdev->blocklen = 4096 + 64; 2472 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2473 2474 num_requests = offset = 0; 2475 while (offset < num_blocks) { 2476 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 2477 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2478 offset, num_io_blocks, 0); 2479 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2480 offset += num_io_blocks; 2481 num_requests++; 2482 } 2483 2484 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2485 CU_ASSERT_EQUAL(rc, 0); 2486 num_completed = stub_complete_io(num_requests); 2487 CU_ASSERT_EQUAL(num_completed, num_requests); 2488 num_completed = stub_complete_io(num_requests); 2489 assert(num_completed == 0); 2490 2491 /* Check the the same for separate metadata buffer */ 2492 bdev->md_interleave = false; 2493 bdev->md_len = 64; 2494 bdev->blocklen = 4096; 2495 2496 num_requests = offset = 0; 2497 while (offset < num_blocks) { 2498 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 2499 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2500 offset, num_io_blocks, 0); 2501 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 2502 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2503 offset += num_io_blocks; 2504 num_requests++; 2505 } 2506 2507 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2508 CU_ASSERT_EQUAL(rc, 0); 2509 num_completed = stub_complete_io(num_requests); 2510 CU_ASSERT_EQUAL(num_completed, num_requests); 2511 2512 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 2513 spdk_put_io_channel(ioch); 2514 spdk_bdev_close(desc); 2515 free_bdev(bdev); 2516 spdk_bdev_finish(bdev_fini_cb, NULL); 2517 poll_threads(); 2518 } 2519 2520 static void 2521 bdev_open_while_hotremove(void) 2522 { 2523 struct spdk_bdev *bdev; 2524 struct spdk_bdev_desc *desc[2] = {}; 2525 int rc; 2526 2527 bdev = allocate_bdev("bdev"); 2528 2529 rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[0]); 2530 CU_ASSERT(rc == 0); 2531 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 2532 2533 spdk_bdev_unregister(bdev, NULL, NULL); 2534 2535 rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[1]); 2536 CU_ASSERT(rc == -ENODEV); 2537 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 2538 2539 spdk_bdev_close(desc[0]); 2540 free_bdev(bdev); 2541 } 2542 2543 static void 2544 bdev_close_while_hotremove(void) 2545 { 2546 struct spdk_bdev *bdev; 2547 struct spdk_bdev_desc *desc = NULL; 2548 int rc = 0; 2549 2550 bdev = allocate_bdev("bdev"); 2551 2552 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 2553 CU_ASSERT_EQUAL(rc, 0); 2554 2555 /* Simulate hot-unplug by unregistering bdev */ 2556 g_event_type1 = 0xFF; 2557 g_unregister_arg = NULL; 2558 g_unregister_rc = -1; 2559 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 2560 /* Close device while remove event is in flight */ 2561 spdk_bdev_close(desc); 2562 2563 /* Ensure that unregister callback is delayed */ 2564 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 2565 CU_ASSERT_EQUAL(g_unregister_rc, -1); 2566 2567 poll_threads(); 2568 2569 /* Event callback shall not be issued because device was closed */ 2570 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 2571 /* Unregister callback is issued */ 2572 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 2573 CU_ASSERT_EQUAL(g_unregister_rc, 0); 2574 2575 free_bdev(bdev); 2576 } 2577 2578 static void 2579 bdev_open_ext(void) 2580 { 2581 struct spdk_bdev *bdev; 2582 struct spdk_bdev_desc *desc1 = NULL; 2583 struct spdk_bdev_desc *desc2 = NULL; 2584 int rc = 0; 2585 2586 bdev = allocate_bdev("bdev"); 2587 2588 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 2589 CU_ASSERT_EQUAL(rc, -EINVAL); 2590 2591 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 2592 CU_ASSERT_EQUAL(rc, 0); 2593 2594 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 2595 CU_ASSERT_EQUAL(rc, 0); 2596 2597 g_event_type1 = 0xFF; 2598 g_event_type2 = 0xFF; 2599 2600 /* Simulate hot-unplug by unregistering bdev */ 2601 spdk_bdev_unregister(bdev, NULL, NULL); 2602 poll_threads(); 2603 2604 /* Check if correct events have been triggered in event callback fn */ 2605 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 2606 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 2607 2608 free_bdev(bdev); 2609 poll_threads(); 2610 } 2611 2612 struct timeout_io_cb_arg { 2613 struct iovec iov; 2614 uint8_t type; 2615 }; 2616 2617 static int 2618 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 2619 { 2620 struct spdk_bdev_io *bdev_io; 2621 int n = 0; 2622 2623 if (!ch) { 2624 return -1; 2625 } 2626 2627 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 2628 n++; 2629 } 2630 2631 return n; 2632 } 2633 2634 static void 2635 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 2636 { 2637 struct timeout_io_cb_arg *ctx = cb_arg; 2638 2639 ctx->type = bdev_io->type; 2640 ctx->iov.iov_base = bdev_io->iov.iov_base; 2641 ctx->iov.iov_len = bdev_io->iov.iov_len; 2642 } 2643 2644 static void 2645 bdev_set_io_timeout(void) 2646 { 2647 struct spdk_bdev *bdev; 2648 struct spdk_bdev_desc *desc = NULL; 2649 struct spdk_io_channel *io_ch = NULL; 2650 struct spdk_bdev_channel *bdev_ch = NULL; 2651 struct timeout_io_cb_arg cb_arg; 2652 2653 spdk_bdev_initialize(bdev_init_cb, NULL); 2654 2655 bdev = allocate_bdev("bdev"); 2656 2657 CU_ASSERT(spdk_bdev_open(bdev, true, NULL, NULL, &desc) == 0); 2658 SPDK_CU_ASSERT_FATAL(desc != NULL); 2659 io_ch = spdk_bdev_get_io_channel(desc); 2660 CU_ASSERT(io_ch != NULL); 2661 2662 bdev_ch = spdk_io_channel_get_ctx(io_ch); 2663 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 2664 2665 /* This is the part1. 2666 * We will check the bdev_ch->io_submitted list 2667 * TO make sure that it can link IOs and only the user submitted IOs 2668 */ 2669 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 2670 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2671 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 2672 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2673 stub_complete_io(1); 2674 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2675 stub_complete_io(1); 2676 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2677 2678 /* Split IO */ 2679 bdev->optimal_io_boundary = 16; 2680 bdev->split_on_optimal_io_boundary = true; 2681 2682 /* Now test that a single-vector command is split correctly. 2683 * Offset 14, length 8, payload 0xF000 2684 * Child - Offset 14, length 2, payload 0xF000 2685 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2686 * 2687 * Set up the expected values before calling spdk_bdev_read_blocks 2688 */ 2689 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2690 /* We only count the submitted IO by the user 2691 * Even the IO split into two IOs but we only count one. 2692 * Becauce the user only see one IO. 2693 */ 2694 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2695 stub_complete_io(1); 2696 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2697 stub_complete_io(1); 2698 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2699 2700 /* Also include the reset IO */ 2701 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2702 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2703 poll_threads(); 2704 stub_complete_io(1); 2705 poll_threads(); 2706 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2707 2708 /* This is part2 2709 * Test the desc timeout poller register 2710 */ 2711 2712 /* Successfully set the timeout */ 2713 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2714 CU_ASSERT(desc->io_timeout_poller != NULL); 2715 CU_ASSERT(desc->timeout_in_sec == 30); 2716 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2717 CU_ASSERT(desc->cb_arg == &cb_arg); 2718 2719 /* Change the timeout limit */ 2720 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2721 CU_ASSERT(desc->io_timeout_poller != NULL); 2722 CU_ASSERT(desc->timeout_in_sec == 20); 2723 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2724 CU_ASSERT(desc->cb_arg == &cb_arg); 2725 2726 /* Disable the timeout */ 2727 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 2728 CU_ASSERT(desc->io_timeout_poller == NULL); 2729 2730 /* This the part3 2731 * We will test to catch timeout IO and check whether the IO is 2732 * the submitted one. 2733 */ 2734 memset(&cb_arg, 0, sizeof(cb_arg)); 2735 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2736 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 2737 2738 /* Don't reach the limit */ 2739 spdk_delay_us(15 * spdk_get_ticks_hz()); 2740 poll_threads(); 2741 CU_ASSERT(cb_arg.type == 0); 2742 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2743 CU_ASSERT(cb_arg.iov.iov_len == 0); 2744 2745 /* 15 + 15 = 30 reach the limit */ 2746 spdk_delay_us(15 * spdk_get_ticks_hz()); 2747 poll_threads(); 2748 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2749 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 2750 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 2751 stub_complete_io(1); 2752 2753 /* Use the same split IO above and check the IO */ 2754 memset(&cb_arg, 0, sizeof(cb_arg)); 2755 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2756 2757 /* The first child complete in time */ 2758 spdk_delay_us(15 * spdk_get_ticks_hz()); 2759 poll_threads(); 2760 stub_complete_io(1); 2761 CU_ASSERT(cb_arg.type == 0); 2762 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2763 CU_ASSERT(cb_arg.iov.iov_len == 0); 2764 2765 /* The second child reach the limit */ 2766 spdk_delay_us(15 * spdk_get_ticks_hz()); 2767 poll_threads(); 2768 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2769 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 2770 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 2771 stub_complete_io(1); 2772 2773 /* Also include the reset IO */ 2774 memset(&cb_arg, 0, sizeof(cb_arg)); 2775 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2776 spdk_delay_us(30 * spdk_get_ticks_hz()); 2777 poll_threads(); 2778 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 2779 stub_complete_io(1); 2780 poll_threads(); 2781 2782 spdk_put_io_channel(io_ch); 2783 spdk_bdev_close(desc); 2784 free_bdev(bdev); 2785 spdk_bdev_finish(bdev_fini_cb, NULL); 2786 poll_threads(); 2787 } 2788 2789 static void 2790 lba_range_overlap(void) 2791 { 2792 struct lba_range r1, r2; 2793 2794 r1.offset = 100; 2795 r1.length = 50; 2796 2797 r2.offset = 0; 2798 r2.length = 1; 2799 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2800 2801 r2.offset = 0; 2802 r2.length = 100; 2803 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2804 2805 r2.offset = 0; 2806 r2.length = 110; 2807 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2808 2809 r2.offset = 100; 2810 r2.length = 10; 2811 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2812 2813 r2.offset = 110; 2814 r2.length = 20; 2815 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2816 2817 r2.offset = 140; 2818 r2.length = 150; 2819 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2820 2821 r2.offset = 130; 2822 r2.length = 200; 2823 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2824 2825 r2.offset = 150; 2826 r2.length = 100; 2827 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2828 2829 r2.offset = 110; 2830 r2.length = 0; 2831 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2832 } 2833 2834 static bool g_lock_lba_range_done; 2835 static bool g_unlock_lba_range_done; 2836 2837 static void 2838 lock_lba_range_done(void *ctx, int status) 2839 { 2840 g_lock_lba_range_done = true; 2841 } 2842 2843 static void 2844 unlock_lba_range_done(void *ctx, int status) 2845 { 2846 g_unlock_lba_range_done = true; 2847 } 2848 2849 static void 2850 lock_lba_range_check_ranges(void) 2851 { 2852 struct spdk_bdev *bdev; 2853 struct spdk_bdev_desc *desc = NULL; 2854 struct spdk_io_channel *io_ch; 2855 struct spdk_bdev_channel *channel; 2856 struct lba_range *range; 2857 int ctx1; 2858 int rc; 2859 2860 spdk_bdev_initialize(bdev_init_cb, NULL); 2861 2862 bdev = allocate_bdev("bdev0"); 2863 2864 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2865 CU_ASSERT(rc == 0); 2866 CU_ASSERT(desc != NULL); 2867 io_ch = spdk_bdev_get_io_channel(desc); 2868 CU_ASSERT(io_ch != NULL); 2869 channel = spdk_io_channel_get_ctx(io_ch); 2870 2871 g_lock_lba_range_done = false; 2872 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2873 CU_ASSERT(rc == 0); 2874 poll_threads(); 2875 2876 CU_ASSERT(g_lock_lba_range_done == true); 2877 range = TAILQ_FIRST(&channel->locked_ranges); 2878 SPDK_CU_ASSERT_FATAL(range != NULL); 2879 CU_ASSERT(range->offset == 20); 2880 CU_ASSERT(range->length == 10); 2881 CU_ASSERT(range->owner_ch == channel); 2882 2883 /* Unlocks must exactly match a lock. */ 2884 g_unlock_lba_range_done = false; 2885 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 2886 CU_ASSERT(rc == -EINVAL); 2887 CU_ASSERT(g_unlock_lba_range_done == false); 2888 2889 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 2890 CU_ASSERT(rc == 0); 2891 spdk_delay_us(100); 2892 poll_threads(); 2893 2894 CU_ASSERT(g_unlock_lba_range_done == true); 2895 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 2896 2897 spdk_put_io_channel(io_ch); 2898 spdk_bdev_close(desc); 2899 free_bdev(bdev); 2900 spdk_bdev_finish(bdev_fini_cb, NULL); 2901 poll_threads(); 2902 } 2903 2904 static void 2905 lock_lba_range_with_io_outstanding(void) 2906 { 2907 struct spdk_bdev *bdev; 2908 struct spdk_bdev_desc *desc = NULL; 2909 struct spdk_io_channel *io_ch; 2910 struct spdk_bdev_channel *channel; 2911 struct lba_range *range; 2912 char buf[4096]; 2913 int ctx1; 2914 int rc; 2915 2916 spdk_bdev_initialize(bdev_init_cb, NULL); 2917 2918 bdev = allocate_bdev("bdev0"); 2919 2920 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2921 CU_ASSERT(rc == 0); 2922 CU_ASSERT(desc != NULL); 2923 io_ch = spdk_bdev_get_io_channel(desc); 2924 CU_ASSERT(io_ch != NULL); 2925 channel = spdk_io_channel_get_ctx(io_ch); 2926 2927 g_io_done = false; 2928 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 2929 CU_ASSERT(rc == 0); 2930 2931 g_lock_lba_range_done = false; 2932 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2933 CU_ASSERT(rc == 0); 2934 poll_threads(); 2935 2936 /* The lock should immediately become valid, since there are no outstanding 2937 * write I/O. 2938 */ 2939 CU_ASSERT(g_io_done == false); 2940 CU_ASSERT(g_lock_lba_range_done == true); 2941 range = TAILQ_FIRST(&channel->locked_ranges); 2942 SPDK_CU_ASSERT_FATAL(range != NULL); 2943 CU_ASSERT(range->offset == 20); 2944 CU_ASSERT(range->length == 10); 2945 CU_ASSERT(range->owner_ch == channel); 2946 CU_ASSERT(range->locked_ctx == &ctx1); 2947 2948 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2949 CU_ASSERT(rc == 0); 2950 stub_complete_io(1); 2951 spdk_delay_us(100); 2952 poll_threads(); 2953 2954 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 2955 2956 /* Now try again, but with a write I/O. */ 2957 g_io_done = false; 2958 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 2959 CU_ASSERT(rc == 0); 2960 2961 g_lock_lba_range_done = false; 2962 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2963 CU_ASSERT(rc == 0); 2964 poll_threads(); 2965 2966 /* The lock should not be fully valid yet, since a write I/O is outstanding. 2967 * But note that the range should be on the channel's locked_list, to make sure no 2968 * new write I/O are started. 2969 */ 2970 CU_ASSERT(g_io_done == false); 2971 CU_ASSERT(g_lock_lba_range_done == false); 2972 range = TAILQ_FIRST(&channel->locked_ranges); 2973 SPDK_CU_ASSERT_FATAL(range != NULL); 2974 CU_ASSERT(range->offset == 20); 2975 CU_ASSERT(range->length == 10); 2976 2977 /* Complete the write I/O. This should make the lock valid (checked by confirming 2978 * our callback was invoked). 2979 */ 2980 stub_complete_io(1); 2981 spdk_delay_us(100); 2982 poll_threads(); 2983 CU_ASSERT(g_io_done == true); 2984 CU_ASSERT(g_lock_lba_range_done == true); 2985 2986 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 2987 CU_ASSERT(rc == 0); 2988 poll_threads(); 2989 2990 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 2991 2992 spdk_put_io_channel(io_ch); 2993 spdk_bdev_close(desc); 2994 free_bdev(bdev); 2995 spdk_bdev_finish(bdev_fini_cb, NULL); 2996 poll_threads(); 2997 } 2998 2999 static void 3000 lock_lba_range_overlapped(void) 3001 { 3002 struct spdk_bdev *bdev; 3003 struct spdk_bdev_desc *desc = NULL; 3004 struct spdk_io_channel *io_ch; 3005 struct spdk_bdev_channel *channel; 3006 struct lba_range *range; 3007 int ctx1; 3008 int rc; 3009 3010 spdk_bdev_initialize(bdev_init_cb, NULL); 3011 3012 bdev = allocate_bdev("bdev0"); 3013 3014 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 3015 CU_ASSERT(rc == 0); 3016 CU_ASSERT(desc != NULL); 3017 io_ch = spdk_bdev_get_io_channel(desc); 3018 CU_ASSERT(io_ch != NULL); 3019 channel = spdk_io_channel_get_ctx(io_ch); 3020 3021 /* Lock range 20-29. */ 3022 g_lock_lba_range_done = false; 3023 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3024 CU_ASSERT(rc == 0); 3025 poll_threads(); 3026 3027 CU_ASSERT(g_lock_lba_range_done == true); 3028 range = TAILQ_FIRST(&channel->locked_ranges); 3029 SPDK_CU_ASSERT_FATAL(range != NULL); 3030 CU_ASSERT(range->offset == 20); 3031 CU_ASSERT(range->length == 10); 3032 3033 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 3034 * 20-29. 3035 */ 3036 g_lock_lba_range_done = false; 3037 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 3038 CU_ASSERT(rc == 0); 3039 poll_threads(); 3040 3041 CU_ASSERT(g_lock_lba_range_done == false); 3042 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3043 SPDK_CU_ASSERT_FATAL(range != NULL); 3044 CU_ASSERT(range->offset == 25); 3045 CU_ASSERT(range->length == 15); 3046 3047 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 3048 * no longer overlaps with an active lock. 3049 */ 3050 g_unlock_lba_range_done = false; 3051 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3052 CU_ASSERT(rc == 0); 3053 poll_threads(); 3054 3055 CU_ASSERT(g_unlock_lba_range_done == true); 3056 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3057 range = TAILQ_FIRST(&channel->locked_ranges); 3058 SPDK_CU_ASSERT_FATAL(range != NULL); 3059 CU_ASSERT(range->offset == 25); 3060 CU_ASSERT(range->length == 15); 3061 3062 /* Lock 40-59. This should immediately lock since it does not overlap with the 3063 * currently active 25-39 lock. 3064 */ 3065 g_lock_lba_range_done = false; 3066 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 3067 CU_ASSERT(rc == 0); 3068 poll_threads(); 3069 3070 CU_ASSERT(g_lock_lba_range_done == true); 3071 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3072 SPDK_CU_ASSERT_FATAL(range != NULL); 3073 range = TAILQ_NEXT(range, tailq); 3074 SPDK_CU_ASSERT_FATAL(range != NULL); 3075 CU_ASSERT(range->offset == 40); 3076 CU_ASSERT(range->length == 20); 3077 3078 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 3079 g_lock_lba_range_done = false; 3080 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 3081 CU_ASSERT(rc == 0); 3082 poll_threads(); 3083 3084 CU_ASSERT(g_lock_lba_range_done == false); 3085 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3086 SPDK_CU_ASSERT_FATAL(range != NULL); 3087 CU_ASSERT(range->offset == 35); 3088 CU_ASSERT(range->length == 10); 3089 3090 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 3091 * the 40-59 lock is still active. 3092 */ 3093 g_unlock_lba_range_done = false; 3094 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 3095 CU_ASSERT(rc == 0); 3096 poll_threads(); 3097 3098 CU_ASSERT(g_unlock_lba_range_done == true); 3099 CU_ASSERT(g_lock_lba_range_done == false); 3100 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3101 SPDK_CU_ASSERT_FATAL(range != NULL); 3102 CU_ASSERT(range->offset == 35); 3103 CU_ASSERT(range->length == 10); 3104 3105 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 3106 * no longer any active overlapping locks. 3107 */ 3108 g_unlock_lba_range_done = false; 3109 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 3110 CU_ASSERT(rc == 0); 3111 poll_threads(); 3112 3113 CU_ASSERT(g_unlock_lba_range_done == true); 3114 CU_ASSERT(g_lock_lba_range_done == true); 3115 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3116 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3117 SPDK_CU_ASSERT_FATAL(range != NULL); 3118 CU_ASSERT(range->offset == 35); 3119 CU_ASSERT(range->length == 10); 3120 3121 /* Finally, unlock 35-44. */ 3122 g_unlock_lba_range_done = false; 3123 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 3124 CU_ASSERT(rc == 0); 3125 poll_threads(); 3126 3127 CU_ASSERT(g_unlock_lba_range_done == true); 3128 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 3129 3130 spdk_put_io_channel(io_ch); 3131 spdk_bdev_close(desc); 3132 free_bdev(bdev); 3133 spdk_bdev_finish(bdev_fini_cb, NULL); 3134 poll_threads(); 3135 } 3136 3137 static void 3138 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 3139 { 3140 g_abort_done = true; 3141 g_abort_status = bdev_io->internal.status; 3142 spdk_bdev_free_io(bdev_io); 3143 } 3144 3145 static void 3146 bdev_io_abort(void) 3147 { 3148 struct spdk_bdev *bdev; 3149 struct spdk_bdev_desc *desc = NULL; 3150 struct spdk_io_channel *io_ch; 3151 struct spdk_bdev_opts bdev_opts = { 3152 .bdev_io_pool_size = 4, 3153 .bdev_io_cache_size = 2, 3154 }; 3155 uint64_t io_ctx1 = 0, io_ctx2 = 0; 3156 int rc; 3157 3158 rc = spdk_bdev_set_opts(&bdev_opts); 3159 CU_ASSERT(rc == 0); 3160 spdk_bdev_initialize(bdev_init_cb, NULL); 3161 3162 bdev = allocate_bdev("bdev0"); 3163 3164 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 3165 CU_ASSERT(rc == 0); 3166 CU_ASSERT(desc != NULL); 3167 io_ch = spdk_bdev_get_io_channel(desc); 3168 CU_ASSERT(io_ch != NULL); 3169 3170 g_abort_done = false; 3171 3172 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 3173 3174 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3175 CU_ASSERT(rc == -ENOTSUP); 3176 3177 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 3178 3179 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 3180 CU_ASSERT(rc == 0); 3181 CU_ASSERT(g_abort_done == true); 3182 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 3183 3184 /* Test the case that the target I/O was successfully aborted. */ 3185 g_io_done = false; 3186 3187 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3188 CU_ASSERT(rc == 0); 3189 CU_ASSERT(g_io_done == false); 3190 3191 g_abort_done = false; 3192 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3193 3194 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3195 CU_ASSERT(rc == 0); 3196 CU_ASSERT(g_io_done == true); 3197 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3198 stub_complete_io(1); 3199 CU_ASSERT(g_abort_done == true); 3200 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3201 3202 /* Test the case that the target I/O was not aborted because it completed 3203 * in the middle of execution of the abort. 3204 */ 3205 g_io_done = false; 3206 3207 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3208 CU_ASSERT(rc == 0); 3209 CU_ASSERT(g_io_done == false); 3210 3211 g_abort_done = false; 3212 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3213 3214 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3215 CU_ASSERT(rc == 0); 3216 CU_ASSERT(g_io_done == false); 3217 3218 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3219 stub_complete_io(1); 3220 CU_ASSERT(g_io_done == true); 3221 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3222 3223 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3224 stub_complete_io(1); 3225 CU_ASSERT(g_abort_done == true); 3226 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3227 3228 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3229 3230 spdk_put_io_channel(io_ch); 3231 spdk_bdev_close(desc); 3232 free_bdev(bdev); 3233 spdk_bdev_finish(bdev_fini_cb, NULL); 3234 poll_threads(); 3235 } 3236 3237 int 3238 main(int argc, char **argv) 3239 { 3240 CU_pSuite suite = NULL; 3241 unsigned int num_failures; 3242 3243 CU_set_error_action(CUEA_ABORT); 3244 CU_initialize_registry(); 3245 3246 suite = CU_add_suite("bdev", null_init, null_clean); 3247 3248 CU_ADD_TEST(suite, bytes_to_blocks_test); 3249 CU_ADD_TEST(suite, num_blocks_test); 3250 CU_ADD_TEST(suite, io_valid_test); 3251 CU_ADD_TEST(suite, open_write_test); 3252 CU_ADD_TEST(suite, alias_add_del_test); 3253 CU_ADD_TEST(suite, get_device_stat_test); 3254 CU_ADD_TEST(suite, bdev_io_types_test); 3255 CU_ADD_TEST(suite, bdev_io_wait_test); 3256 CU_ADD_TEST(suite, bdev_io_spans_boundary_test); 3257 CU_ADD_TEST(suite, bdev_io_split_test); 3258 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 3259 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 3260 CU_ADD_TEST(suite, bdev_io_alignment); 3261 CU_ADD_TEST(suite, bdev_histograms); 3262 CU_ADD_TEST(suite, bdev_write_zeroes); 3263 CU_ADD_TEST(suite, bdev_compare_and_write); 3264 CU_ADD_TEST(suite, bdev_compare); 3265 CU_ADD_TEST(suite, bdev_open_while_hotremove); 3266 CU_ADD_TEST(suite, bdev_close_while_hotremove); 3267 CU_ADD_TEST(suite, bdev_open_ext); 3268 CU_ADD_TEST(suite, bdev_set_io_timeout); 3269 CU_ADD_TEST(suite, lba_range_overlap); 3270 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 3271 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 3272 CU_ADD_TEST(suite, lock_lba_range_overlapped); 3273 CU_ADD_TEST(suite, bdev_io_abort); 3274 3275 allocate_threads(1); 3276 set_thread(0); 3277 3278 CU_basic_set_mode(CU_BRM_VERBOSE); 3279 CU_basic_run_tests(); 3280 num_failures = CU_get_number_of_failures(); 3281 CU_cleanup_registry(); 3282 3283 free_threads(); 3284 3285 return num_failures; 3286 } 3287