1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk_cunit.h" 36 37 #include "common/lib/ut_multithread.c" 38 #include "unit/lib/json_mock.c" 39 40 #include "spdk/config.h" 41 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 42 #undef SPDK_CONFIG_VTUNE 43 44 #include "bdev/bdev.c" 45 46 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 47 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 48 49 int g_status; 50 int g_count; 51 enum spdk_bdev_event_type g_event_type1; 52 enum spdk_bdev_event_type g_event_type2; 53 struct spdk_histogram_data *g_histogram; 54 void *g_unregister_arg; 55 int g_unregister_rc; 56 57 void 58 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 59 int *sc, int *sk, int *asc, int *ascq) 60 { 61 } 62 63 static int 64 null_init(void) 65 { 66 return 0; 67 } 68 69 static int 70 null_clean(void) 71 { 72 return 0; 73 } 74 75 static int 76 stub_destruct(void *ctx) 77 { 78 return 0; 79 } 80 81 struct ut_expected_io { 82 uint8_t type; 83 uint64_t offset; 84 uint64_t length; 85 int iovcnt; 86 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 87 void *md_buf; 88 struct spdk_bdev_ext_io_opts *ext_io_opts; 89 TAILQ_ENTRY(ut_expected_io) link; 90 }; 91 92 struct bdev_ut_channel { 93 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 94 uint32_t outstanding_io_count; 95 TAILQ_HEAD(, ut_expected_io) expected_io; 96 }; 97 98 static bool g_io_done; 99 static struct spdk_bdev_io *g_bdev_io; 100 static enum spdk_bdev_io_status g_io_status; 101 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 102 static uint32_t g_bdev_ut_io_device; 103 static struct bdev_ut_channel *g_bdev_ut_channel; 104 static void *g_compare_read_buf; 105 static uint32_t g_compare_read_buf_len; 106 static void *g_compare_write_buf; 107 static uint32_t g_compare_write_buf_len; 108 static bool g_abort_done; 109 static enum spdk_bdev_io_status g_abort_status; 110 static void *g_zcopy_read_buf; 111 static uint32_t g_zcopy_read_buf_len; 112 static void *g_zcopy_write_buf; 113 static uint32_t g_zcopy_write_buf_len; 114 static struct spdk_bdev_io *g_zcopy_bdev_io; 115 116 static struct ut_expected_io * 117 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 118 { 119 struct ut_expected_io *expected_io; 120 121 expected_io = calloc(1, sizeof(*expected_io)); 122 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 123 124 expected_io->type = type; 125 expected_io->offset = offset; 126 expected_io->length = length; 127 expected_io->iovcnt = iovcnt; 128 129 return expected_io; 130 } 131 132 static void 133 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 134 { 135 expected_io->iov[pos].iov_base = base; 136 expected_io->iov[pos].iov_len = len; 137 } 138 139 static void 140 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 141 { 142 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 143 struct ut_expected_io *expected_io; 144 struct iovec *iov, *expected_iov; 145 struct spdk_bdev_io *bio_to_abort; 146 int i; 147 148 g_bdev_io = bdev_io; 149 150 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 151 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 152 153 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 154 CU_ASSERT(g_compare_read_buf_len == len); 155 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 156 } 157 158 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 159 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 160 161 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 162 CU_ASSERT(g_compare_write_buf_len == len); 163 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 164 } 165 166 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 167 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 168 169 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 170 CU_ASSERT(g_compare_read_buf_len == len); 171 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 172 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 173 } 174 } 175 176 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 177 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 178 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 179 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 180 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 181 ch->outstanding_io_count--; 182 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 183 break; 184 } 185 } 186 } 187 } 188 189 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 190 if (bdev_io->u.bdev.zcopy.start) { 191 g_zcopy_bdev_io = bdev_io; 192 if (bdev_io->u.bdev.zcopy.populate) { 193 /* Start of a read */ 194 CU_ASSERT(g_zcopy_read_buf != NULL); 195 CU_ASSERT(g_zcopy_read_buf_len > 0); 196 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 197 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 198 bdev_io->u.bdev.iovcnt = 1; 199 } else { 200 /* Start of a write */ 201 CU_ASSERT(g_zcopy_write_buf != NULL); 202 CU_ASSERT(g_zcopy_write_buf_len > 0); 203 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 204 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 205 bdev_io->u.bdev.iovcnt = 1; 206 } 207 } else { 208 if (bdev_io->u.bdev.zcopy.commit) { 209 /* End of write */ 210 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 211 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 212 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 213 g_zcopy_write_buf = NULL; 214 g_zcopy_write_buf_len = 0; 215 } else { 216 /* End of read */ 217 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 218 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 219 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 220 g_zcopy_read_buf = NULL; 221 g_zcopy_read_buf_len = 0; 222 } 223 } 224 } 225 226 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 227 ch->outstanding_io_count++; 228 229 expected_io = TAILQ_FIRST(&ch->expected_io); 230 if (expected_io == NULL) { 231 return; 232 } 233 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 234 235 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 236 CU_ASSERT(bdev_io->type == expected_io->type); 237 } 238 239 if (expected_io->md_buf != NULL) { 240 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 241 } 242 243 if (expected_io->length == 0) { 244 free(expected_io); 245 return; 246 } 247 248 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 249 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 250 251 if (expected_io->iovcnt == 0) { 252 free(expected_io); 253 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 254 return; 255 } 256 257 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 258 for (i = 0; i < expected_io->iovcnt; i++) { 259 expected_iov = &expected_io->iov[i]; 260 if (bdev_io->internal.orig_iovcnt == 0) { 261 iov = &bdev_io->u.bdev.iovs[i]; 262 } else { 263 iov = bdev_io->internal.orig_iovs; 264 } 265 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 266 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 267 } 268 269 if (expected_io->ext_io_opts) { 270 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts) 271 } 272 273 free(expected_io); 274 } 275 276 static void 277 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 278 struct spdk_bdev_io *bdev_io, bool success) 279 { 280 CU_ASSERT(success == true); 281 282 stub_submit_request(_ch, bdev_io); 283 } 284 285 static void 286 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 287 { 288 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 289 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 290 } 291 292 static uint32_t 293 stub_complete_io(uint32_t num_to_complete) 294 { 295 struct bdev_ut_channel *ch = g_bdev_ut_channel; 296 struct spdk_bdev_io *bdev_io; 297 static enum spdk_bdev_io_status io_status; 298 uint32_t num_completed = 0; 299 300 while (num_completed < num_to_complete) { 301 if (TAILQ_EMPTY(&ch->outstanding_io)) { 302 break; 303 } 304 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 305 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 306 ch->outstanding_io_count--; 307 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 308 g_io_exp_status; 309 spdk_bdev_io_complete(bdev_io, io_status); 310 num_completed++; 311 } 312 313 return num_completed; 314 } 315 316 static struct spdk_io_channel * 317 bdev_ut_get_io_channel(void *ctx) 318 { 319 return spdk_get_io_channel(&g_bdev_ut_io_device); 320 } 321 322 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 323 [SPDK_BDEV_IO_TYPE_READ] = true, 324 [SPDK_BDEV_IO_TYPE_WRITE] = true, 325 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 326 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 327 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 328 [SPDK_BDEV_IO_TYPE_RESET] = true, 329 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 330 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 331 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 332 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 333 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 334 [SPDK_BDEV_IO_TYPE_ABORT] = true, 335 }; 336 337 static void 338 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 339 { 340 g_io_types_supported[io_type] = enable; 341 } 342 343 static bool 344 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 345 { 346 return g_io_types_supported[io_type]; 347 } 348 349 static struct spdk_bdev_fn_table fn_table = { 350 .destruct = stub_destruct, 351 .submit_request = stub_submit_request, 352 .get_io_channel = bdev_ut_get_io_channel, 353 .io_type_supported = stub_io_type_supported, 354 }; 355 356 static int 357 bdev_ut_create_ch(void *io_device, void *ctx_buf) 358 { 359 struct bdev_ut_channel *ch = ctx_buf; 360 361 CU_ASSERT(g_bdev_ut_channel == NULL); 362 g_bdev_ut_channel = ch; 363 364 TAILQ_INIT(&ch->outstanding_io); 365 ch->outstanding_io_count = 0; 366 TAILQ_INIT(&ch->expected_io); 367 return 0; 368 } 369 370 static void 371 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 372 { 373 CU_ASSERT(g_bdev_ut_channel != NULL); 374 g_bdev_ut_channel = NULL; 375 } 376 377 struct spdk_bdev_module bdev_ut_if; 378 379 static int 380 bdev_ut_module_init(void) 381 { 382 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 383 sizeof(struct bdev_ut_channel), NULL); 384 spdk_bdev_module_init_done(&bdev_ut_if); 385 return 0; 386 } 387 388 static void 389 bdev_ut_module_fini(void) 390 { 391 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 392 } 393 394 struct spdk_bdev_module bdev_ut_if = { 395 .name = "bdev_ut", 396 .module_init = bdev_ut_module_init, 397 .module_fini = bdev_ut_module_fini, 398 .async_init = true, 399 }; 400 401 static void vbdev_ut_examine(struct spdk_bdev *bdev); 402 403 static int 404 vbdev_ut_module_init(void) 405 { 406 return 0; 407 } 408 409 static void 410 vbdev_ut_module_fini(void) 411 { 412 } 413 414 struct spdk_bdev_module vbdev_ut_if = { 415 .name = "vbdev_ut", 416 .module_init = vbdev_ut_module_init, 417 .module_fini = vbdev_ut_module_fini, 418 .examine_config = vbdev_ut_examine, 419 }; 420 421 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 422 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 423 424 static void 425 vbdev_ut_examine(struct spdk_bdev *bdev) 426 { 427 spdk_bdev_module_examine_done(&vbdev_ut_if); 428 } 429 430 static struct spdk_bdev * 431 allocate_bdev(char *name) 432 { 433 struct spdk_bdev *bdev; 434 int rc; 435 436 bdev = calloc(1, sizeof(*bdev)); 437 SPDK_CU_ASSERT_FATAL(bdev != NULL); 438 439 bdev->name = name; 440 bdev->fn_table = &fn_table; 441 bdev->module = &bdev_ut_if; 442 bdev->blockcnt = 1024; 443 bdev->blocklen = 512; 444 445 rc = spdk_bdev_register(bdev); 446 CU_ASSERT(rc == 0); 447 448 return bdev; 449 } 450 451 static struct spdk_bdev * 452 allocate_vbdev(char *name) 453 { 454 struct spdk_bdev *bdev; 455 int rc; 456 457 bdev = calloc(1, sizeof(*bdev)); 458 SPDK_CU_ASSERT_FATAL(bdev != NULL); 459 460 bdev->name = name; 461 bdev->fn_table = &fn_table; 462 bdev->module = &vbdev_ut_if; 463 464 rc = spdk_bdev_register(bdev); 465 CU_ASSERT(rc == 0); 466 467 return bdev; 468 } 469 470 static void 471 free_bdev(struct spdk_bdev *bdev) 472 { 473 spdk_bdev_unregister(bdev, NULL, NULL); 474 poll_threads(); 475 memset(bdev, 0xFF, sizeof(*bdev)); 476 free(bdev); 477 } 478 479 static void 480 free_vbdev(struct spdk_bdev *bdev) 481 { 482 spdk_bdev_unregister(bdev, NULL, NULL); 483 poll_threads(); 484 memset(bdev, 0xFF, sizeof(*bdev)); 485 free(bdev); 486 } 487 488 static void 489 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 490 { 491 const char *bdev_name; 492 493 CU_ASSERT(bdev != NULL); 494 CU_ASSERT(rc == 0); 495 bdev_name = spdk_bdev_get_name(bdev); 496 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 497 498 free(stat); 499 500 *(bool *)cb_arg = true; 501 } 502 503 static void 504 bdev_unregister_cb(void *cb_arg, int rc) 505 { 506 g_unregister_arg = cb_arg; 507 g_unregister_rc = rc; 508 } 509 510 static void 511 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 512 { 513 } 514 515 static void 516 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 517 { 518 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 519 520 g_event_type1 = type; 521 if (SPDK_BDEV_EVENT_REMOVE == type) { 522 spdk_bdev_close(desc); 523 } 524 } 525 526 static void 527 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 528 { 529 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 530 531 g_event_type2 = type; 532 if (SPDK_BDEV_EVENT_REMOVE == type) { 533 spdk_bdev_close(desc); 534 } 535 } 536 537 static void 538 get_device_stat_test(void) 539 { 540 struct spdk_bdev *bdev; 541 struct spdk_bdev_io_stat *stat; 542 bool done; 543 544 bdev = allocate_bdev("bdev0"); 545 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 546 if (stat == NULL) { 547 free_bdev(bdev); 548 return; 549 } 550 551 done = false; 552 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 553 while (!done) { poll_threads(); } 554 555 free_bdev(bdev); 556 } 557 558 static void 559 open_write_test(void) 560 { 561 struct spdk_bdev *bdev[9]; 562 struct spdk_bdev_desc *desc[9] = {}; 563 int rc; 564 565 /* 566 * Create a tree of bdevs to test various open w/ write cases. 567 * 568 * bdev0 through bdev3 are physical block devices, such as NVMe 569 * namespaces or Ceph block devices. 570 * 571 * bdev4 is a virtual bdev with multiple base bdevs. This models 572 * caching or RAID use cases. 573 * 574 * bdev5 through bdev7 are all virtual bdevs with the same base 575 * bdev (except bdev7). This models partitioning or logical volume 576 * use cases. 577 * 578 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 579 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 580 * models caching, RAID, partitioning or logical volumes use cases. 581 * 582 * bdev8 is a virtual bdev with multiple base bdevs, but these 583 * base bdevs are themselves virtual bdevs. 584 * 585 * bdev8 586 * | 587 * +----------+ 588 * | | 589 * bdev4 bdev5 bdev6 bdev7 590 * | | | | 591 * +---+---+ +---+ + +---+---+ 592 * | | \ | / \ 593 * bdev0 bdev1 bdev2 bdev3 594 */ 595 596 bdev[0] = allocate_bdev("bdev0"); 597 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 598 CU_ASSERT(rc == 0); 599 600 bdev[1] = allocate_bdev("bdev1"); 601 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 602 CU_ASSERT(rc == 0); 603 604 bdev[2] = allocate_bdev("bdev2"); 605 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 606 CU_ASSERT(rc == 0); 607 608 bdev[3] = allocate_bdev("bdev3"); 609 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 610 CU_ASSERT(rc == 0); 611 612 bdev[4] = allocate_vbdev("bdev4"); 613 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 614 CU_ASSERT(rc == 0); 615 616 bdev[5] = allocate_vbdev("bdev5"); 617 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 618 CU_ASSERT(rc == 0); 619 620 bdev[6] = allocate_vbdev("bdev6"); 621 622 bdev[7] = allocate_vbdev("bdev7"); 623 624 bdev[8] = allocate_vbdev("bdev8"); 625 626 /* Open bdev0 read-only. This should succeed. */ 627 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 628 CU_ASSERT(rc == 0); 629 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 630 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 631 spdk_bdev_close(desc[0]); 632 633 /* 634 * Open bdev1 read/write. This should fail since bdev1 has been claimed 635 * by a vbdev module. 636 */ 637 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 638 CU_ASSERT(rc == -EPERM); 639 640 /* 641 * Open bdev4 read/write. This should fail since bdev3 has been claimed 642 * by a vbdev module. 643 */ 644 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 645 CU_ASSERT(rc == -EPERM); 646 647 /* Open bdev4 read-only. This should succeed. */ 648 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 649 CU_ASSERT(rc == 0); 650 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 651 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 652 spdk_bdev_close(desc[4]); 653 654 /* 655 * Open bdev8 read/write. This should succeed since it is a leaf 656 * bdev. 657 */ 658 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 659 CU_ASSERT(rc == 0); 660 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 661 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 662 spdk_bdev_close(desc[8]); 663 664 /* 665 * Open bdev5 read/write. This should fail since bdev4 has been claimed 666 * by a vbdev module. 667 */ 668 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 669 CU_ASSERT(rc == -EPERM); 670 671 /* Open bdev4 read-only. This should succeed. */ 672 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 673 CU_ASSERT(rc == 0); 674 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 675 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 676 spdk_bdev_close(desc[5]); 677 678 free_vbdev(bdev[8]); 679 680 free_vbdev(bdev[5]); 681 free_vbdev(bdev[6]); 682 free_vbdev(bdev[7]); 683 684 free_vbdev(bdev[4]); 685 686 free_bdev(bdev[0]); 687 free_bdev(bdev[1]); 688 free_bdev(bdev[2]); 689 free_bdev(bdev[3]); 690 } 691 692 static void 693 claim_test(void) 694 { 695 struct spdk_bdev *bdev; 696 struct spdk_bdev_desc *desc, *open_desc; 697 int rc; 698 uint32_t count; 699 700 /* 701 * A vbdev that uses a read-only bdev may need it to remain read-only. 702 * To do so, it opens the bdev read-only, then claims it without 703 * passing a spdk_bdev_desc. 704 */ 705 bdev = allocate_bdev("bdev0"); 706 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 707 CU_ASSERT(rc == 0); 708 CU_ASSERT(desc->write == false); 709 710 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 711 CU_ASSERT(rc == 0); 712 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 713 714 /* There should be only one open descriptor and it should still be ro */ 715 count = 0; 716 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 717 CU_ASSERT(open_desc == desc); 718 CU_ASSERT(!open_desc->write); 719 count++; 720 } 721 CU_ASSERT(count == 1); 722 723 /* A read-only bdev is upgraded to read-write if desc is passed. */ 724 spdk_bdev_module_release_bdev(bdev); 725 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 726 CU_ASSERT(rc == 0); 727 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 728 729 /* There should be only one open descriptor and it should be rw */ 730 count = 0; 731 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 732 CU_ASSERT(open_desc == desc); 733 CU_ASSERT(open_desc->write); 734 count++; 735 } 736 CU_ASSERT(count == 1); 737 738 spdk_bdev_close(desc); 739 free_bdev(bdev); 740 } 741 742 static void 743 bytes_to_blocks_test(void) 744 { 745 struct spdk_bdev bdev; 746 uint64_t offset_blocks, num_blocks; 747 748 memset(&bdev, 0, sizeof(bdev)); 749 750 bdev.blocklen = 512; 751 752 /* All parameters valid */ 753 offset_blocks = 0; 754 num_blocks = 0; 755 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 756 CU_ASSERT(offset_blocks == 1); 757 CU_ASSERT(num_blocks == 2); 758 759 /* Offset not a block multiple */ 760 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 761 762 /* Length not a block multiple */ 763 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 764 765 /* In case blocklen not the power of two */ 766 bdev.blocklen = 100; 767 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 768 CU_ASSERT(offset_blocks == 1); 769 CU_ASSERT(num_blocks == 2); 770 771 /* Offset not a block multiple */ 772 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 773 774 /* Length not a block multiple */ 775 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 776 } 777 778 static void 779 num_blocks_test(void) 780 { 781 struct spdk_bdev bdev; 782 struct spdk_bdev_desc *desc = NULL; 783 int rc; 784 785 memset(&bdev, 0, sizeof(bdev)); 786 bdev.name = "num_blocks"; 787 bdev.fn_table = &fn_table; 788 bdev.module = &bdev_ut_if; 789 spdk_bdev_register(&bdev); 790 spdk_bdev_notify_blockcnt_change(&bdev, 50); 791 792 /* Growing block number */ 793 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 794 /* Shrinking block number */ 795 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 796 797 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 798 CU_ASSERT(rc == 0); 799 SPDK_CU_ASSERT_FATAL(desc != NULL); 800 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 801 802 /* Growing block number */ 803 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 804 /* Shrinking block number */ 805 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 806 807 g_event_type1 = 0xFF; 808 /* Growing block number */ 809 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 810 811 poll_threads(); 812 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 813 814 g_event_type1 = 0xFF; 815 /* Growing block number and closing */ 816 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 817 818 spdk_bdev_close(desc); 819 spdk_bdev_unregister(&bdev, NULL, NULL); 820 821 poll_threads(); 822 823 /* Callback is not called for closed device */ 824 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 825 } 826 827 static void 828 io_valid_test(void) 829 { 830 struct spdk_bdev bdev; 831 832 memset(&bdev, 0, sizeof(bdev)); 833 834 bdev.blocklen = 512; 835 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 836 837 spdk_bdev_notify_blockcnt_change(&bdev, 100); 838 839 /* All parameters valid */ 840 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 841 842 /* Last valid block */ 843 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 844 845 /* Offset past end of bdev */ 846 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 847 848 /* Offset + length past end of bdev */ 849 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 850 851 /* Offset near end of uint64_t range (2^64 - 1) */ 852 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 853 854 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 855 } 856 857 static void 858 alias_add_del_test(void) 859 { 860 struct spdk_bdev *bdev[3]; 861 int rc; 862 863 /* Creating and registering bdevs */ 864 bdev[0] = allocate_bdev("bdev0"); 865 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 866 867 bdev[1] = allocate_bdev("bdev1"); 868 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 869 870 bdev[2] = allocate_bdev("bdev2"); 871 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 872 873 poll_threads(); 874 875 /* 876 * Trying adding an alias identical to name. 877 * Alias is identical to name, so it can not be added to aliases list 878 */ 879 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 880 CU_ASSERT(rc == -EEXIST); 881 882 /* 883 * Trying to add empty alias, 884 * this one should fail 885 */ 886 rc = spdk_bdev_alias_add(bdev[0], NULL); 887 CU_ASSERT(rc == -EINVAL); 888 889 /* Trying adding same alias to two different registered bdevs */ 890 891 /* Alias is used first time, so this one should pass */ 892 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 893 CU_ASSERT(rc == 0); 894 895 /* Alias was added to another bdev, so this one should fail */ 896 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 897 CU_ASSERT(rc == -EEXIST); 898 899 /* Alias is used first time, so this one should pass */ 900 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 901 CU_ASSERT(rc == 0); 902 903 /* Trying removing an alias from registered bdevs */ 904 905 /* Alias is not on a bdev aliases list, so this one should fail */ 906 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 907 CU_ASSERT(rc == -ENOENT); 908 909 /* Alias is present on a bdev aliases list, so this one should pass */ 910 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 911 CU_ASSERT(rc == 0); 912 913 /* Alias is present on a bdev aliases list, so this one should pass */ 914 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 915 CU_ASSERT(rc == 0); 916 917 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 918 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 919 CU_ASSERT(rc != 0); 920 921 /* Trying to del all alias from empty alias list */ 922 spdk_bdev_alias_del_all(bdev[2]); 923 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 924 925 /* Trying to del all alias from non-empty alias list */ 926 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 927 CU_ASSERT(rc == 0); 928 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 929 CU_ASSERT(rc == 0); 930 spdk_bdev_alias_del_all(bdev[2]); 931 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 932 933 /* Unregister and free bdevs */ 934 spdk_bdev_unregister(bdev[0], NULL, NULL); 935 spdk_bdev_unregister(bdev[1], NULL, NULL); 936 spdk_bdev_unregister(bdev[2], NULL, NULL); 937 938 poll_threads(); 939 940 free(bdev[0]); 941 free(bdev[1]); 942 free(bdev[2]); 943 } 944 945 static void 946 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 947 { 948 g_io_done = true; 949 g_io_status = bdev_io->internal.status; 950 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 951 (bdev_io->u.bdev.zcopy.start)) { 952 g_zcopy_bdev_io = bdev_io; 953 } else { 954 spdk_bdev_free_io(bdev_io); 955 g_zcopy_bdev_io = NULL; 956 } 957 } 958 959 static void 960 bdev_init_cb(void *arg, int rc) 961 { 962 CU_ASSERT(rc == 0); 963 } 964 965 static void 966 bdev_fini_cb(void *arg) 967 { 968 } 969 970 struct bdev_ut_io_wait_entry { 971 struct spdk_bdev_io_wait_entry entry; 972 struct spdk_io_channel *io_ch; 973 struct spdk_bdev_desc *desc; 974 bool submitted; 975 }; 976 977 static void 978 io_wait_cb(void *arg) 979 { 980 struct bdev_ut_io_wait_entry *entry = arg; 981 int rc; 982 983 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 984 CU_ASSERT(rc == 0); 985 entry->submitted = true; 986 } 987 988 static void 989 bdev_io_types_test(void) 990 { 991 struct spdk_bdev *bdev; 992 struct spdk_bdev_desc *desc = NULL; 993 struct spdk_io_channel *io_ch; 994 struct spdk_bdev_opts bdev_opts = {}; 995 int rc; 996 997 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 998 bdev_opts.bdev_io_pool_size = 4; 999 bdev_opts.bdev_io_cache_size = 2; 1000 1001 rc = spdk_bdev_set_opts(&bdev_opts); 1002 CU_ASSERT(rc == 0); 1003 spdk_bdev_initialize(bdev_init_cb, NULL); 1004 poll_threads(); 1005 1006 bdev = allocate_bdev("bdev0"); 1007 1008 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1009 CU_ASSERT(rc == 0); 1010 poll_threads(); 1011 SPDK_CU_ASSERT_FATAL(desc != NULL); 1012 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1013 io_ch = spdk_bdev_get_io_channel(desc); 1014 CU_ASSERT(io_ch != NULL); 1015 1016 /* WRITE and WRITE ZEROES are not supported */ 1017 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1018 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1019 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1020 CU_ASSERT(rc == -ENOTSUP); 1021 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1022 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1023 1024 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1025 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1026 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1027 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1028 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1029 CU_ASSERT(rc == -ENOTSUP); 1030 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1031 CU_ASSERT(rc == -ENOTSUP); 1032 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1033 CU_ASSERT(rc == -ENOTSUP); 1034 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1035 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1036 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1037 1038 spdk_put_io_channel(io_ch); 1039 spdk_bdev_close(desc); 1040 free_bdev(bdev); 1041 spdk_bdev_finish(bdev_fini_cb, NULL); 1042 poll_threads(); 1043 } 1044 1045 static void 1046 bdev_io_wait_test(void) 1047 { 1048 struct spdk_bdev *bdev; 1049 struct spdk_bdev_desc *desc = NULL; 1050 struct spdk_io_channel *io_ch; 1051 struct spdk_bdev_opts bdev_opts = {}; 1052 struct bdev_ut_io_wait_entry io_wait_entry; 1053 struct bdev_ut_io_wait_entry io_wait_entry2; 1054 int rc; 1055 1056 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1057 bdev_opts.bdev_io_pool_size = 4; 1058 bdev_opts.bdev_io_cache_size = 2; 1059 1060 rc = spdk_bdev_set_opts(&bdev_opts); 1061 CU_ASSERT(rc == 0); 1062 spdk_bdev_initialize(bdev_init_cb, NULL); 1063 poll_threads(); 1064 1065 bdev = allocate_bdev("bdev0"); 1066 1067 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1068 CU_ASSERT(rc == 0); 1069 poll_threads(); 1070 SPDK_CU_ASSERT_FATAL(desc != NULL); 1071 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1072 io_ch = spdk_bdev_get_io_channel(desc); 1073 CU_ASSERT(io_ch != NULL); 1074 1075 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1076 CU_ASSERT(rc == 0); 1077 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1078 CU_ASSERT(rc == 0); 1079 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1080 CU_ASSERT(rc == 0); 1081 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1082 CU_ASSERT(rc == 0); 1083 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1084 1085 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1086 CU_ASSERT(rc == -ENOMEM); 1087 1088 io_wait_entry.entry.bdev = bdev; 1089 io_wait_entry.entry.cb_fn = io_wait_cb; 1090 io_wait_entry.entry.cb_arg = &io_wait_entry; 1091 io_wait_entry.io_ch = io_ch; 1092 io_wait_entry.desc = desc; 1093 io_wait_entry.submitted = false; 1094 /* Cannot use the same io_wait_entry for two different calls. */ 1095 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1096 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1097 1098 /* Queue two I/O waits. */ 1099 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1100 CU_ASSERT(rc == 0); 1101 CU_ASSERT(io_wait_entry.submitted == false); 1102 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1103 CU_ASSERT(rc == 0); 1104 CU_ASSERT(io_wait_entry2.submitted == false); 1105 1106 stub_complete_io(1); 1107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1108 CU_ASSERT(io_wait_entry.submitted == true); 1109 CU_ASSERT(io_wait_entry2.submitted == false); 1110 1111 stub_complete_io(1); 1112 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1113 CU_ASSERT(io_wait_entry2.submitted == true); 1114 1115 stub_complete_io(4); 1116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1117 1118 spdk_put_io_channel(io_ch); 1119 spdk_bdev_close(desc); 1120 free_bdev(bdev); 1121 spdk_bdev_finish(bdev_fini_cb, NULL); 1122 poll_threads(); 1123 } 1124 1125 static void 1126 bdev_io_spans_split_test(void) 1127 { 1128 struct spdk_bdev bdev; 1129 struct spdk_bdev_io bdev_io; 1130 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1131 1132 memset(&bdev, 0, sizeof(bdev)); 1133 bdev_io.u.bdev.iovs = iov; 1134 1135 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1136 bdev.optimal_io_boundary = 0; 1137 bdev.max_segment_size = 0; 1138 bdev.max_num_segments = 0; 1139 bdev_io.bdev = &bdev; 1140 1141 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1142 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1143 1144 bdev.split_on_optimal_io_boundary = true; 1145 bdev.optimal_io_boundary = 32; 1146 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1147 1148 /* RESETs are not based on LBAs - so this should return false. */ 1149 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1150 1151 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1152 bdev_io.u.bdev.offset_blocks = 0; 1153 bdev_io.u.bdev.num_blocks = 32; 1154 1155 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1156 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1157 1158 bdev_io.u.bdev.num_blocks = 33; 1159 1160 /* This I/O spans a boundary. */ 1161 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1162 1163 bdev_io.u.bdev.num_blocks = 32; 1164 bdev.max_segment_size = 512 * 32; 1165 bdev.max_num_segments = 1; 1166 bdev_io.u.bdev.iovcnt = 1; 1167 iov[0].iov_len = 512; 1168 1169 /* Does not cross and exceed max_size or max_segs */ 1170 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1171 1172 bdev.split_on_optimal_io_boundary = false; 1173 bdev.max_segment_size = 512; 1174 bdev.max_num_segments = 1; 1175 bdev_io.u.bdev.iovcnt = 2; 1176 1177 /* Exceed max_segs */ 1178 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1179 1180 bdev.max_num_segments = 2; 1181 iov[0].iov_len = 513; 1182 iov[1].iov_len = 512; 1183 1184 /* Exceed max_sizes */ 1185 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1186 } 1187 1188 static void 1189 bdev_io_boundary_split_test(void) 1190 { 1191 struct spdk_bdev *bdev; 1192 struct spdk_bdev_desc *desc = NULL; 1193 struct spdk_io_channel *io_ch; 1194 struct spdk_bdev_opts bdev_opts = {}; 1195 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1196 struct ut_expected_io *expected_io; 1197 void *md_buf = (void *)0xFF000000; 1198 uint64_t i; 1199 int rc; 1200 1201 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1202 bdev_opts.bdev_io_pool_size = 512; 1203 bdev_opts.bdev_io_cache_size = 64; 1204 1205 rc = spdk_bdev_set_opts(&bdev_opts); 1206 CU_ASSERT(rc == 0); 1207 spdk_bdev_initialize(bdev_init_cb, NULL); 1208 1209 bdev = allocate_bdev("bdev0"); 1210 1211 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1212 CU_ASSERT(rc == 0); 1213 SPDK_CU_ASSERT_FATAL(desc != NULL); 1214 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1215 io_ch = spdk_bdev_get_io_channel(desc); 1216 CU_ASSERT(io_ch != NULL); 1217 1218 bdev->optimal_io_boundary = 16; 1219 bdev->split_on_optimal_io_boundary = false; 1220 1221 g_io_done = false; 1222 1223 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1224 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1225 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1226 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1227 1228 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1229 CU_ASSERT(rc == 0); 1230 CU_ASSERT(g_io_done == false); 1231 1232 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1233 stub_complete_io(1); 1234 CU_ASSERT(g_io_done == true); 1235 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1236 1237 bdev->split_on_optimal_io_boundary = true; 1238 bdev->md_interleave = false; 1239 bdev->md_len = 8; 1240 1241 /* Now test that a single-vector command is split correctly. 1242 * Offset 14, length 8, payload 0xF000 1243 * Child - Offset 14, length 2, payload 0xF000 1244 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1245 * 1246 * Set up the expected values before calling spdk_bdev_read_blocks 1247 */ 1248 g_io_done = false; 1249 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1250 expected_io->md_buf = md_buf; 1251 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1252 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1253 1254 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1255 expected_io->md_buf = md_buf + 2 * 8; 1256 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1257 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1258 1259 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1260 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1261 14, 8, io_done, NULL); 1262 CU_ASSERT(rc == 0); 1263 CU_ASSERT(g_io_done == false); 1264 1265 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1266 stub_complete_io(2); 1267 CU_ASSERT(g_io_done == true); 1268 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1269 1270 /* Now set up a more complex, multi-vector command that needs to be split, 1271 * including splitting iovecs. 1272 */ 1273 iov[0].iov_base = (void *)0x10000; 1274 iov[0].iov_len = 512; 1275 iov[1].iov_base = (void *)0x20000; 1276 iov[1].iov_len = 20 * 512; 1277 iov[2].iov_base = (void *)0x30000; 1278 iov[2].iov_len = 11 * 512; 1279 1280 g_io_done = false; 1281 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1282 expected_io->md_buf = md_buf; 1283 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1284 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1285 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1286 1287 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1288 expected_io->md_buf = md_buf + 2 * 8; 1289 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1290 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1291 1292 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1293 expected_io->md_buf = md_buf + 18 * 8; 1294 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1295 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1296 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1297 1298 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1299 14, 32, io_done, NULL); 1300 CU_ASSERT(rc == 0); 1301 CU_ASSERT(g_io_done == false); 1302 1303 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1304 stub_complete_io(3); 1305 CU_ASSERT(g_io_done == true); 1306 1307 /* Test multi vector command that needs to be split by strip and then needs to be 1308 * split further due to the capacity of child iovs. 1309 */ 1310 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1311 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1312 iov[i].iov_len = 512; 1313 } 1314 1315 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1316 g_io_done = false; 1317 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1318 BDEV_IO_NUM_CHILD_IOV); 1319 expected_io->md_buf = md_buf; 1320 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1321 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1322 } 1323 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1324 1325 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1326 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1327 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1328 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1329 ut_expected_io_set_iov(expected_io, i, 1330 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1331 } 1332 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1333 1334 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1335 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1336 CU_ASSERT(rc == 0); 1337 CU_ASSERT(g_io_done == false); 1338 1339 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1340 stub_complete_io(1); 1341 CU_ASSERT(g_io_done == false); 1342 1343 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1344 stub_complete_io(1); 1345 CU_ASSERT(g_io_done == true); 1346 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1347 1348 /* Test multi vector command that needs to be split by strip and then needs to be 1349 * split further due to the capacity of child iovs. In this case, the length of 1350 * the rest of iovec array with an I/O boundary is the multiple of block size. 1351 */ 1352 1353 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1354 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1355 */ 1356 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1357 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1358 iov[i].iov_len = 512; 1359 } 1360 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1361 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1362 iov[i].iov_len = 256; 1363 } 1364 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1365 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1366 1367 /* Add an extra iovec to trigger split */ 1368 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1369 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1370 1371 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1372 g_io_done = false; 1373 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1374 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1375 expected_io->md_buf = md_buf; 1376 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1377 ut_expected_io_set_iov(expected_io, i, 1378 (void *)((i + 1) * 0x10000), 512); 1379 } 1380 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1381 ut_expected_io_set_iov(expected_io, i, 1382 (void *)((i + 1) * 0x10000), 256); 1383 } 1384 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1385 1386 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1387 1, 1); 1388 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1389 ut_expected_io_set_iov(expected_io, 0, 1390 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1391 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1392 1393 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1394 1, 1); 1395 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1396 ut_expected_io_set_iov(expected_io, 0, 1397 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1398 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1399 1400 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1401 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1402 CU_ASSERT(rc == 0); 1403 CU_ASSERT(g_io_done == false); 1404 1405 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1406 stub_complete_io(1); 1407 CU_ASSERT(g_io_done == false); 1408 1409 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1410 stub_complete_io(2); 1411 CU_ASSERT(g_io_done == true); 1412 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1413 1414 /* Test multi vector command that needs to be split by strip and then needs to be 1415 * split further due to the capacity of child iovs, the child request offset should 1416 * be rewind to last aligned offset and go success without error. 1417 */ 1418 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1419 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1420 iov[i].iov_len = 512; 1421 } 1422 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1423 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1424 1425 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1426 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1427 1428 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1429 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1430 1431 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1432 g_io_done = false; 1433 g_io_status = 0; 1434 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1435 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1436 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1437 expected_io->md_buf = md_buf; 1438 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1439 ut_expected_io_set_iov(expected_io, i, 1440 (void *)((i + 1) * 0x10000), 512); 1441 } 1442 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1443 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1444 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1445 1, 2); 1446 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1447 ut_expected_io_set_iov(expected_io, 0, 1448 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1449 ut_expected_io_set_iov(expected_io, 1, 1450 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1451 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1452 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1453 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1454 1, 1); 1455 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1456 ut_expected_io_set_iov(expected_io, 0, 1457 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1458 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1459 1460 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1461 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1462 CU_ASSERT(rc == 0); 1463 CU_ASSERT(g_io_done == false); 1464 1465 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1466 stub_complete_io(1); 1467 CU_ASSERT(g_io_done == false); 1468 1469 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1470 stub_complete_io(2); 1471 CU_ASSERT(g_io_done == true); 1472 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1473 1474 /* Test multi vector command that needs to be split due to the IO boundary and 1475 * the capacity of child iovs. Especially test the case when the command is 1476 * split due to the capacity of child iovs, the tail address is not aligned with 1477 * block size and is rewinded to the aligned address. 1478 * 1479 * The iovecs used in read request is complex but is based on the data 1480 * collected in the real issue. We change the base addresses but keep the lengths 1481 * not to loose the credibility of the test. 1482 */ 1483 bdev->optimal_io_boundary = 128; 1484 g_io_done = false; 1485 g_io_status = 0; 1486 1487 for (i = 0; i < 31; i++) { 1488 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1489 iov[i].iov_len = 1024; 1490 } 1491 iov[31].iov_base = (void *)0xFEED1F00000; 1492 iov[31].iov_len = 32768; 1493 iov[32].iov_base = (void *)0xFEED2000000; 1494 iov[32].iov_len = 160; 1495 iov[33].iov_base = (void *)0xFEED2100000; 1496 iov[33].iov_len = 4096; 1497 iov[34].iov_base = (void *)0xFEED2200000; 1498 iov[34].iov_len = 4096; 1499 iov[35].iov_base = (void *)0xFEED2300000; 1500 iov[35].iov_len = 4096; 1501 iov[36].iov_base = (void *)0xFEED2400000; 1502 iov[36].iov_len = 4096; 1503 iov[37].iov_base = (void *)0xFEED2500000; 1504 iov[37].iov_len = 4096; 1505 iov[38].iov_base = (void *)0xFEED2600000; 1506 iov[38].iov_len = 4096; 1507 iov[39].iov_base = (void *)0xFEED2700000; 1508 iov[39].iov_len = 4096; 1509 iov[40].iov_base = (void *)0xFEED2800000; 1510 iov[40].iov_len = 4096; 1511 iov[41].iov_base = (void *)0xFEED2900000; 1512 iov[41].iov_len = 4096; 1513 iov[42].iov_base = (void *)0xFEED2A00000; 1514 iov[42].iov_len = 4096; 1515 iov[43].iov_base = (void *)0xFEED2B00000; 1516 iov[43].iov_len = 12288; 1517 iov[44].iov_base = (void *)0xFEED2C00000; 1518 iov[44].iov_len = 8192; 1519 iov[45].iov_base = (void *)0xFEED2F00000; 1520 iov[45].iov_len = 4096; 1521 iov[46].iov_base = (void *)0xFEED3000000; 1522 iov[46].iov_len = 4096; 1523 iov[47].iov_base = (void *)0xFEED3100000; 1524 iov[47].iov_len = 4096; 1525 iov[48].iov_base = (void *)0xFEED3200000; 1526 iov[48].iov_len = 24576; 1527 iov[49].iov_base = (void *)0xFEED3300000; 1528 iov[49].iov_len = 16384; 1529 iov[50].iov_base = (void *)0xFEED3400000; 1530 iov[50].iov_len = 12288; 1531 iov[51].iov_base = (void *)0xFEED3500000; 1532 iov[51].iov_len = 4096; 1533 iov[52].iov_base = (void *)0xFEED3600000; 1534 iov[52].iov_len = 4096; 1535 iov[53].iov_base = (void *)0xFEED3700000; 1536 iov[53].iov_len = 4096; 1537 iov[54].iov_base = (void *)0xFEED3800000; 1538 iov[54].iov_len = 28672; 1539 iov[55].iov_base = (void *)0xFEED3900000; 1540 iov[55].iov_len = 20480; 1541 iov[56].iov_base = (void *)0xFEED3A00000; 1542 iov[56].iov_len = 4096; 1543 iov[57].iov_base = (void *)0xFEED3B00000; 1544 iov[57].iov_len = 12288; 1545 iov[58].iov_base = (void *)0xFEED3C00000; 1546 iov[58].iov_len = 4096; 1547 iov[59].iov_base = (void *)0xFEED3D00000; 1548 iov[59].iov_len = 4096; 1549 iov[60].iov_base = (void *)0xFEED3E00000; 1550 iov[60].iov_len = 352; 1551 1552 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1553 * of child iovs, 1554 */ 1555 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1556 expected_io->md_buf = md_buf; 1557 for (i = 0; i < 32; i++) { 1558 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1559 } 1560 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1561 1562 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1563 * split by the IO boundary requirement. 1564 */ 1565 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1566 expected_io->md_buf = md_buf + 126 * 8; 1567 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1568 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1569 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1570 1571 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1572 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1573 */ 1574 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1575 expected_io->md_buf = md_buf + 128 * 8; 1576 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1577 iov[33].iov_len - 864); 1578 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1579 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1580 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1581 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1582 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1583 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1584 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1585 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1586 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1587 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1588 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1589 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1590 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1591 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1592 1593 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1594 * first 864 bytes of iov[52] split by the IO boundary requirement. 1595 */ 1596 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1597 expected_io->md_buf = md_buf + 256 * 8; 1598 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1599 iov[46].iov_len - 864); 1600 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1601 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1602 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1603 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1604 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1605 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1606 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1607 1608 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1609 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1610 */ 1611 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1612 expected_io->md_buf = md_buf + 384 * 8; 1613 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1614 iov[52].iov_len - 864); 1615 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1616 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1617 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1618 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1619 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1620 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1621 1622 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1623 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1624 */ 1625 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1626 expected_io->md_buf = md_buf + 512 * 8; 1627 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1628 iov[57].iov_len - 4960); 1629 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1630 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1631 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1632 1633 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1634 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1635 expected_io->md_buf = md_buf + 542 * 8; 1636 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1637 iov[59].iov_len - 3936); 1638 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1640 1641 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1642 0, 543, io_done, NULL); 1643 CU_ASSERT(rc == 0); 1644 CU_ASSERT(g_io_done == false); 1645 1646 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1647 stub_complete_io(1); 1648 CU_ASSERT(g_io_done == false); 1649 1650 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1651 stub_complete_io(5); 1652 CU_ASSERT(g_io_done == false); 1653 1654 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1655 stub_complete_io(1); 1656 CU_ASSERT(g_io_done == true); 1657 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1658 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1659 1660 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1661 * split, so test that. 1662 */ 1663 bdev->optimal_io_boundary = 15; 1664 g_io_done = false; 1665 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1666 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1667 1668 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1669 CU_ASSERT(rc == 0); 1670 CU_ASSERT(g_io_done == false); 1671 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1672 stub_complete_io(1); 1673 CU_ASSERT(g_io_done == true); 1674 1675 /* Test an UNMAP. This should also not be split. */ 1676 bdev->optimal_io_boundary = 16; 1677 g_io_done = false; 1678 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1679 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1680 1681 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1682 CU_ASSERT(rc == 0); 1683 CU_ASSERT(g_io_done == false); 1684 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1685 stub_complete_io(1); 1686 CU_ASSERT(g_io_done == true); 1687 1688 /* Test a FLUSH. This should also not be split. */ 1689 bdev->optimal_io_boundary = 16; 1690 g_io_done = false; 1691 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1692 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1693 1694 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1695 CU_ASSERT(rc == 0); 1696 CU_ASSERT(g_io_done == false); 1697 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1698 stub_complete_io(1); 1699 CU_ASSERT(g_io_done == true); 1700 1701 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1702 1703 /* Children requests return an error status */ 1704 bdev->optimal_io_boundary = 16; 1705 iov[0].iov_base = (void *)0x10000; 1706 iov[0].iov_len = 512 * 64; 1707 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1708 g_io_done = false; 1709 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1710 1711 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1712 CU_ASSERT(rc == 0); 1713 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1714 stub_complete_io(4); 1715 CU_ASSERT(g_io_done == false); 1716 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1717 stub_complete_io(1); 1718 CU_ASSERT(g_io_done == true); 1719 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1720 1721 /* Test if a multi vector command terminated with failure before continuing 1722 * splitting process when one of child I/O failed. 1723 * The multi vector command is as same as the above that needs to be split by strip 1724 * and then needs to be split further due to the capacity of child iovs. 1725 */ 1726 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1727 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1728 iov[i].iov_len = 512; 1729 } 1730 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1731 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1732 1733 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1734 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1735 1736 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1737 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1738 1739 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1740 1741 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1742 g_io_done = false; 1743 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1744 1745 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1746 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1747 CU_ASSERT(rc == 0); 1748 CU_ASSERT(g_io_done == false); 1749 1750 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1751 stub_complete_io(1); 1752 CU_ASSERT(g_io_done == true); 1753 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1754 1755 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1756 1757 /* for this test we will create the following conditions to hit the code path where 1758 * we are trying to send and IO following a split that has no iovs because we had to 1759 * trim them for alignment reasons. 1760 * 1761 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1762 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1763 * position 30 and overshoot by 0x2e. 1764 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1765 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1766 * which eliniates that vector so we just send the first split IO with 30 vectors 1767 * and let the completion pick up the last 2 vectors. 1768 */ 1769 bdev->optimal_io_boundary = 32; 1770 bdev->split_on_optimal_io_boundary = true; 1771 g_io_done = false; 1772 1773 /* Init all parent IOVs to 0x212 */ 1774 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1775 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1776 iov[i].iov_len = 0x212; 1777 } 1778 1779 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1780 BDEV_IO_NUM_CHILD_IOV - 1); 1781 /* expect 0-29 to be 1:1 with the parent iov */ 1782 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1783 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1784 } 1785 1786 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1787 * where 0x1e is the amount we overshot the 16K boundary 1788 */ 1789 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1790 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1791 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1792 1793 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1794 * shortened that take it to the next boundary and then a final one to get us to 1795 * 0x4200 bytes for the IO. 1796 */ 1797 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1798 BDEV_IO_NUM_CHILD_IOV, 2); 1799 /* position 30 picked up the remaining bytes to the next boundary */ 1800 ut_expected_io_set_iov(expected_io, 0, 1801 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1802 1803 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1804 ut_expected_io_set_iov(expected_io, 1, 1805 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1806 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1807 1808 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1809 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1810 CU_ASSERT(rc == 0); 1811 CU_ASSERT(g_io_done == false); 1812 1813 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1814 stub_complete_io(1); 1815 CU_ASSERT(g_io_done == false); 1816 1817 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1818 stub_complete_io(1); 1819 CU_ASSERT(g_io_done == true); 1820 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1821 1822 spdk_put_io_channel(io_ch); 1823 spdk_bdev_close(desc); 1824 free_bdev(bdev); 1825 spdk_bdev_finish(bdev_fini_cb, NULL); 1826 poll_threads(); 1827 } 1828 1829 static void 1830 bdev_io_max_size_and_segment_split_test(void) 1831 { 1832 struct spdk_bdev *bdev; 1833 struct spdk_bdev_desc *desc = NULL; 1834 struct spdk_io_channel *io_ch; 1835 struct spdk_bdev_opts bdev_opts = {}; 1836 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1837 struct ut_expected_io *expected_io; 1838 uint64_t i; 1839 int rc; 1840 1841 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1842 bdev_opts.bdev_io_pool_size = 512; 1843 bdev_opts.bdev_io_cache_size = 64; 1844 1845 bdev_opts.opts_size = sizeof(bdev_opts); 1846 rc = spdk_bdev_set_opts(&bdev_opts); 1847 CU_ASSERT(rc == 0); 1848 spdk_bdev_initialize(bdev_init_cb, NULL); 1849 1850 bdev = allocate_bdev("bdev0"); 1851 1852 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1853 CU_ASSERT(rc == 0); 1854 SPDK_CU_ASSERT_FATAL(desc != NULL); 1855 io_ch = spdk_bdev_get_io_channel(desc); 1856 CU_ASSERT(io_ch != NULL); 1857 1858 bdev->split_on_optimal_io_boundary = false; 1859 bdev->optimal_io_boundary = 0; 1860 1861 /* Case 0 max_num_segments == 0. 1862 * but segment size 2 * 512 > 512 1863 */ 1864 bdev->max_segment_size = 512; 1865 bdev->max_num_segments = 0; 1866 g_io_done = false; 1867 1868 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1869 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1870 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1871 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1872 1873 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1874 CU_ASSERT(rc == 0); 1875 CU_ASSERT(g_io_done == false); 1876 1877 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1878 stub_complete_io(1); 1879 CU_ASSERT(g_io_done == true); 1880 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1881 1882 /* Case 1 max_segment_size == 0 1883 * but iov num 2 > 1. 1884 */ 1885 bdev->max_segment_size = 0; 1886 bdev->max_num_segments = 1; 1887 g_io_done = false; 1888 1889 iov[0].iov_base = (void *)0x10000; 1890 iov[0].iov_len = 512; 1891 iov[1].iov_base = (void *)0x20000; 1892 iov[1].iov_len = 8 * 512; 1893 1894 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1895 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1896 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1897 1898 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1899 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1900 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1901 1902 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1903 CU_ASSERT(rc == 0); 1904 CU_ASSERT(g_io_done == false); 1905 1906 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1907 stub_complete_io(2); 1908 CU_ASSERT(g_io_done == true); 1909 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1910 1911 /* Test that a non-vector command is split correctly. 1912 * Set up the expected values before calling spdk_bdev_read_blocks 1913 */ 1914 bdev->max_segment_size = 512; 1915 bdev->max_num_segments = 1; 1916 g_io_done = false; 1917 1918 /* Child IO 0 */ 1919 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1920 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1921 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1922 1923 /* Child IO 1 */ 1924 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1925 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1926 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1927 1928 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1929 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1930 CU_ASSERT(rc == 0); 1931 CU_ASSERT(g_io_done == false); 1932 1933 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1934 stub_complete_io(2); 1935 CU_ASSERT(g_io_done == true); 1936 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1937 1938 /* Now set up a more complex, multi-vector command that needs to be split, 1939 * including splitting iovecs. 1940 */ 1941 bdev->max_segment_size = 2 * 512; 1942 bdev->max_num_segments = 1; 1943 g_io_done = false; 1944 1945 iov[0].iov_base = (void *)0x10000; 1946 iov[0].iov_len = 2 * 512; 1947 iov[1].iov_base = (void *)0x20000; 1948 iov[1].iov_len = 4 * 512; 1949 iov[2].iov_base = (void *)0x30000; 1950 iov[2].iov_len = 6 * 512; 1951 1952 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1953 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1954 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1955 1956 /* Split iov[1].size to 2 iov entries then split the segments */ 1957 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1958 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1959 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1960 1961 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1962 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1963 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1964 1965 /* Split iov[2].size to 3 iov entries then split the segments */ 1966 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1967 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1968 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1969 1970 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1971 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1972 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1973 1974 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1975 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1976 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1977 1978 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1979 CU_ASSERT(rc == 0); 1980 CU_ASSERT(g_io_done == false); 1981 1982 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 1983 stub_complete_io(6); 1984 CU_ASSERT(g_io_done == true); 1985 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1986 1987 /* Test multi vector command that needs to be split by strip and then needs to be 1988 * split further due to the capacity of parent IO child iovs. 1989 */ 1990 bdev->max_segment_size = 512; 1991 bdev->max_num_segments = 1; 1992 g_io_done = false; 1993 1994 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1995 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1996 iov[i].iov_len = 512 * 2; 1997 } 1998 1999 /* Each input iov.size is split into 2 iovs, 2000 * half of the input iov can fill all child iov entries of a single IO. 2001 */ 2002 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2003 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2004 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2005 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2006 2007 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2008 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2009 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2010 } 2011 2012 /* The remaining iov is split in the second round */ 2013 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2014 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2015 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2016 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2017 2018 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2019 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2020 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2021 } 2022 2023 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2024 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2025 CU_ASSERT(rc == 0); 2026 CU_ASSERT(g_io_done == false); 2027 2028 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2029 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2030 CU_ASSERT(g_io_done == false); 2031 2032 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2033 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2034 CU_ASSERT(g_io_done == true); 2035 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2036 2037 /* A wrong case, a child IO that is divided does 2038 * not meet the principle of multiples of block size, 2039 * and exits with error 2040 */ 2041 bdev->max_segment_size = 512; 2042 bdev->max_num_segments = 1; 2043 g_io_done = false; 2044 2045 iov[0].iov_base = (void *)0x10000; 2046 iov[0].iov_len = 512 + 256; 2047 iov[1].iov_base = (void *)0x20000; 2048 iov[1].iov_len = 256; 2049 2050 /* iov[0] is split to 512 and 256. 2051 * 256 is less than a block size, and it is found 2052 * in the next round of split that it is the first child IO smaller than 2053 * the block size, so the error exit 2054 */ 2055 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2056 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2057 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2058 2059 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2060 CU_ASSERT(rc == 0); 2061 CU_ASSERT(g_io_done == false); 2062 2063 /* First child IO is OK */ 2064 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2065 stub_complete_io(1); 2066 CU_ASSERT(g_io_done == true); 2067 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2068 2069 /* error exit */ 2070 stub_complete_io(1); 2071 CU_ASSERT(g_io_done == true); 2072 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2073 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2074 2075 /* Test multi vector command that needs to be split by strip and then needs to be 2076 * split further due to the capacity of child iovs. 2077 * 2078 * In this case, the last two iovs need to be split, but it will exceed the capacity 2079 * of child iovs, so it needs to wait until the first batch completed. 2080 */ 2081 bdev->max_segment_size = 512; 2082 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2083 g_io_done = false; 2084 2085 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2086 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2087 iov[i].iov_len = 512; 2088 } 2089 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2090 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2091 iov[i].iov_len = 512 * 2; 2092 } 2093 2094 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2095 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2096 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2097 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2098 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2099 } 2100 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2101 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2102 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2103 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2104 2105 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2106 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2107 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2108 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2109 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2110 2111 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2112 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2113 CU_ASSERT(rc == 0); 2114 CU_ASSERT(g_io_done == false); 2115 2116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2117 stub_complete_io(1); 2118 CU_ASSERT(g_io_done == false); 2119 2120 /* Next round */ 2121 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2122 stub_complete_io(1); 2123 CU_ASSERT(g_io_done == true); 2124 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2125 2126 /* This case is similar to the previous one, but the io composed of 2127 * the last few entries of child iov is not enough for a blocklen, so they 2128 * cannot be put into this IO, but wait until the next time. 2129 */ 2130 bdev->max_segment_size = 512; 2131 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2132 g_io_done = false; 2133 2134 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2135 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2136 iov[i].iov_len = 512; 2137 } 2138 2139 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2140 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2141 iov[i].iov_len = 128; 2142 } 2143 2144 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2145 * Because the left 2 iov is not enough for a blocklen. 2146 */ 2147 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2148 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2149 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2150 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2151 } 2152 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2153 2154 /* The second child io waits until the end of the first child io before executing. 2155 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2156 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2157 */ 2158 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2159 1, 4); 2160 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2161 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2162 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2163 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2164 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2165 2166 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2167 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2168 CU_ASSERT(rc == 0); 2169 CU_ASSERT(g_io_done == false); 2170 2171 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2172 stub_complete_io(1); 2173 CU_ASSERT(g_io_done == false); 2174 2175 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2176 stub_complete_io(1); 2177 CU_ASSERT(g_io_done == true); 2178 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2179 2180 /* A very complicated case. Each sg entry exceeds max_segment_size and 2181 * needs to be split. At the same time, child io must be a multiple of blocklen. 2182 * At the same time, child iovcnt exceeds parent iovcnt. 2183 */ 2184 bdev->max_segment_size = 512 + 128; 2185 bdev->max_num_segments = 3; 2186 g_io_done = false; 2187 2188 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2189 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2190 iov[i].iov_len = 512 + 256; 2191 } 2192 2193 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2194 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2195 iov[i].iov_len = 512 + 128; 2196 } 2197 2198 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2199 * Consume 4 parent IO iov entries per for() round and 6 block size. 2200 * Generate 9 child IOs. 2201 */ 2202 for (i = 0; i < 3; i++) { 2203 uint32_t j = i * 4; 2204 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2205 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2206 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2207 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2208 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2209 2210 /* Child io must be a multiple of blocklen 2211 * iov[j + 2] must be split. If the third entry is also added, 2212 * the multiple of blocklen cannot be guaranteed. But it still 2213 * occupies one iov entry of the parent child iov. 2214 */ 2215 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2216 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2217 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2218 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2219 2220 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2221 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2222 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2223 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2224 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2225 } 2226 2227 /* Child iov position at 27, the 10th child IO 2228 * iov entry index is 3 * 4 and offset is 3 * 6 2229 */ 2230 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2231 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2232 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2233 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2234 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2235 2236 /* Child iov position at 30, the 11th child IO */ 2237 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2238 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2239 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2240 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2241 2242 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2243 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2244 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2245 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2246 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2247 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2248 2249 /* Consume 9 child IOs and 27 child iov entries. 2250 * Consume 4 parent IO iov entries per for() round and 6 block size. 2251 * Parent IO iov index start from 16 and block offset start from 24 2252 */ 2253 for (i = 0; i < 3; i++) { 2254 uint32_t j = i * 4 + 16; 2255 uint32_t offset = i * 6 + 24; 2256 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2257 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2258 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2259 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2260 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2261 2262 /* Child io must be a multiple of blocklen 2263 * iov[j + 2] must be split. If the third entry is also added, 2264 * the multiple of blocklen cannot be guaranteed. But it still 2265 * occupies one iov entry of the parent child iov. 2266 */ 2267 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2268 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2269 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2270 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2271 2272 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2273 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2274 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2275 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2276 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2277 } 2278 2279 /* The 22th child IO, child iov position at 30 */ 2280 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2281 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2282 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2283 2284 /* The third round */ 2285 /* Here is the 23nd child IO and child iovpos is 0 */ 2286 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2287 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2288 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2289 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2290 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2291 2292 /* The 24th child IO */ 2293 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2294 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2295 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2296 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2297 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2298 2299 /* The 25th child IO */ 2300 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2301 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2302 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2303 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2304 2305 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2306 50, io_done, NULL); 2307 CU_ASSERT(rc == 0); 2308 CU_ASSERT(g_io_done == false); 2309 2310 /* Parent IO supports up to 32 child iovs, so it is calculated that 2311 * a maximum of 11 IOs can be split at a time, and the 2312 * splitting will continue after the first batch is over. 2313 */ 2314 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2315 stub_complete_io(11); 2316 CU_ASSERT(g_io_done == false); 2317 2318 /* The 2nd round */ 2319 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2320 stub_complete_io(11); 2321 CU_ASSERT(g_io_done == false); 2322 2323 /* The last round */ 2324 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2325 stub_complete_io(3); 2326 CU_ASSERT(g_io_done == true); 2327 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2328 2329 /* Test an WRITE_ZEROES. This should also not be split. */ 2330 bdev->max_segment_size = 512; 2331 bdev->max_num_segments = 1; 2332 g_io_done = false; 2333 2334 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2335 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2336 2337 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2338 CU_ASSERT(rc == 0); 2339 CU_ASSERT(g_io_done == false); 2340 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2341 stub_complete_io(1); 2342 CU_ASSERT(g_io_done == true); 2343 2344 /* Test an UNMAP. This should also not be split. */ 2345 g_io_done = false; 2346 2347 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2348 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2349 2350 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2351 CU_ASSERT(rc == 0); 2352 CU_ASSERT(g_io_done == false); 2353 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2354 stub_complete_io(1); 2355 CU_ASSERT(g_io_done == true); 2356 2357 /* Test a FLUSH. This should also not be split. */ 2358 g_io_done = false; 2359 2360 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2361 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2362 2363 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2364 CU_ASSERT(rc == 0); 2365 CU_ASSERT(g_io_done == false); 2366 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2367 stub_complete_io(1); 2368 CU_ASSERT(g_io_done == true); 2369 2370 spdk_put_io_channel(io_ch); 2371 spdk_bdev_close(desc); 2372 free_bdev(bdev); 2373 spdk_bdev_finish(bdev_fini_cb, NULL); 2374 poll_threads(); 2375 } 2376 2377 static void 2378 bdev_io_mix_split_test(void) 2379 { 2380 struct spdk_bdev *bdev; 2381 struct spdk_bdev_desc *desc = NULL; 2382 struct spdk_io_channel *io_ch; 2383 struct spdk_bdev_opts bdev_opts = {}; 2384 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2385 struct ut_expected_io *expected_io; 2386 uint64_t i; 2387 int rc; 2388 2389 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2390 bdev_opts.bdev_io_pool_size = 512; 2391 bdev_opts.bdev_io_cache_size = 64; 2392 2393 rc = spdk_bdev_set_opts(&bdev_opts); 2394 CU_ASSERT(rc == 0); 2395 spdk_bdev_initialize(bdev_init_cb, NULL); 2396 2397 bdev = allocate_bdev("bdev0"); 2398 2399 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2400 CU_ASSERT(rc == 0); 2401 SPDK_CU_ASSERT_FATAL(desc != NULL); 2402 io_ch = spdk_bdev_get_io_channel(desc); 2403 CU_ASSERT(io_ch != NULL); 2404 2405 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2406 bdev->split_on_optimal_io_boundary = true; 2407 bdev->optimal_io_boundary = 16; 2408 2409 bdev->max_segment_size = 512; 2410 bdev->max_num_segments = 16; 2411 g_io_done = false; 2412 2413 /* IO crossing the IO boundary requires split 2414 * Total 2 child IOs. 2415 */ 2416 2417 /* The 1st child IO split the segment_size to multiple segment entry */ 2418 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2419 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2420 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2421 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2422 2423 /* The 2nd child IO split the segment_size to multiple segment entry */ 2424 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2425 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2426 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2427 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2428 2429 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2430 CU_ASSERT(rc == 0); 2431 CU_ASSERT(g_io_done == false); 2432 2433 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2434 stub_complete_io(2); 2435 CU_ASSERT(g_io_done == true); 2436 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2437 2438 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2439 bdev->max_segment_size = 15 * 512; 2440 bdev->max_num_segments = 1; 2441 g_io_done = false; 2442 2443 /* IO crossing the IO boundary requires split. 2444 * The 1st child IO segment size exceeds the max_segment_size, 2445 * So 1st child IO will be splitted to multiple segment entry. 2446 * Then it split to 2 child IOs because of the max_num_segments. 2447 * Total 3 child IOs. 2448 */ 2449 2450 /* The first 2 IOs are in an IO boundary. 2451 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2452 * So it split to the first 2 IOs. 2453 */ 2454 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2455 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2456 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2457 2458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2459 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2460 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2461 2462 /* The 3rd Child IO is because of the io boundary */ 2463 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2464 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2465 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2466 2467 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2468 CU_ASSERT(rc == 0); 2469 CU_ASSERT(g_io_done == false); 2470 2471 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2472 stub_complete_io(3); 2473 CU_ASSERT(g_io_done == true); 2474 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2475 2476 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2477 bdev->max_segment_size = 17 * 512; 2478 bdev->max_num_segments = 1; 2479 g_io_done = false; 2480 2481 /* IO crossing the IO boundary requires split. 2482 * Child IO does not split. 2483 * Total 2 child IOs. 2484 */ 2485 2486 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2487 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2489 2490 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2491 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2492 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2493 2494 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2495 CU_ASSERT(rc == 0); 2496 CU_ASSERT(g_io_done == false); 2497 2498 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2499 stub_complete_io(2); 2500 CU_ASSERT(g_io_done == true); 2501 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2502 2503 /* Now set up a more complex, multi-vector command that needs to be split, 2504 * including splitting iovecs. 2505 * optimal_io_boundary < max_segment_size * max_num_segments 2506 */ 2507 bdev->max_segment_size = 3 * 512; 2508 bdev->max_num_segments = 6; 2509 g_io_done = false; 2510 2511 iov[0].iov_base = (void *)0x10000; 2512 iov[0].iov_len = 4 * 512; 2513 iov[1].iov_base = (void *)0x20000; 2514 iov[1].iov_len = 4 * 512; 2515 iov[2].iov_base = (void *)0x30000; 2516 iov[2].iov_len = 10 * 512; 2517 2518 /* IO crossing the IO boundary requires split. 2519 * The 1st child IO segment size exceeds the max_segment_size and after 2520 * splitting segment_size, the num_segments exceeds max_num_segments. 2521 * So 1st child IO will be splitted to 2 child IOs. 2522 * Total 3 child IOs. 2523 */ 2524 2525 /* The first 2 IOs are in an IO boundary. 2526 * After splitting segment size the segment num exceeds. 2527 * So it splits to 2 child IOs. 2528 */ 2529 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2530 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2531 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2532 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2533 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2534 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2535 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2536 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2537 2538 /* The 2nd child IO has the left segment entry */ 2539 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2540 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2541 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2542 2543 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2544 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2545 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2546 2547 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2548 CU_ASSERT(rc == 0); 2549 CU_ASSERT(g_io_done == false); 2550 2551 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2552 stub_complete_io(3); 2553 CU_ASSERT(g_io_done == true); 2554 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2555 2556 /* A very complicated case. Each sg entry exceeds max_segment_size 2557 * and split on io boundary. 2558 * optimal_io_boundary < max_segment_size * max_num_segments 2559 */ 2560 bdev->max_segment_size = 3 * 512; 2561 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2562 g_io_done = false; 2563 2564 for (i = 0; i < 20; i++) { 2565 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2566 iov[i].iov_len = 512 * 4; 2567 } 2568 2569 /* IO crossing the IO boundary requires split. 2570 * 80 block length can split 5 child IOs base on offset and IO boundary. 2571 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2572 * Total 5 child IOs. 2573 */ 2574 2575 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2576 * So each child IO occupies 8 child iov entries. 2577 */ 2578 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2579 for (i = 0; i < 4; i++) { 2580 int iovcnt = i * 2; 2581 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2582 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2583 } 2584 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2585 2586 /* 2nd child IO and total 16 child iov entries of parent IO */ 2587 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2588 for (i = 4; i < 8; i++) { 2589 int iovcnt = (i - 4) * 2; 2590 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2591 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2592 } 2593 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2594 2595 /* 3rd child IO and total 24 child iov entries of parent IO */ 2596 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2597 for (i = 8; i < 12; i++) { 2598 int iovcnt = (i - 8) * 2; 2599 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2600 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2601 } 2602 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2603 2604 /* 4th child IO and total 32 child iov entries of parent IO */ 2605 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2606 for (i = 12; i < 16; i++) { 2607 int iovcnt = (i - 12) * 2; 2608 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2609 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2610 } 2611 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2612 2613 /* 5th child IO and because of the child iov entry it should be splitted 2614 * in next round. 2615 */ 2616 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2617 for (i = 16; i < 20; i++) { 2618 int iovcnt = (i - 16) * 2; 2619 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2620 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2621 } 2622 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2623 2624 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2625 CU_ASSERT(rc == 0); 2626 CU_ASSERT(g_io_done == false); 2627 2628 /* First split round */ 2629 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2630 stub_complete_io(4); 2631 CU_ASSERT(g_io_done == false); 2632 2633 /* Second split round */ 2634 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2635 stub_complete_io(1); 2636 CU_ASSERT(g_io_done == true); 2637 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2638 2639 spdk_put_io_channel(io_ch); 2640 spdk_bdev_close(desc); 2641 free_bdev(bdev); 2642 spdk_bdev_finish(bdev_fini_cb, NULL); 2643 poll_threads(); 2644 } 2645 2646 static void 2647 bdev_io_split_with_io_wait(void) 2648 { 2649 struct spdk_bdev *bdev; 2650 struct spdk_bdev_desc *desc = NULL; 2651 struct spdk_io_channel *io_ch; 2652 struct spdk_bdev_channel *channel; 2653 struct spdk_bdev_mgmt_channel *mgmt_ch; 2654 struct spdk_bdev_opts bdev_opts = {}; 2655 struct iovec iov[3]; 2656 struct ut_expected_io *expected_io; 2657 int rc; 2658 2659 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2660 bdev_opts.bdev_io_pool_size = 2; 2661 bdev_opts.bdev_io_cache_size = 1; 2662 2663 rc = spdk_bdev_set_opts(&bdev_opts); 2664 CU_ASSERT(rc == 0); 2665 spdk_bdev_initialize(bdev_init_cb, NULL); 2666 2667 bdev = allocate_bdev("bdev0"); 2668 2669 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2670 CU_ASSERT(rc == 0); 2671 CU_ASSERT(desc != NULL); 2672 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2673 io_ch = spdk_bdev_get_io_channel(desc); 2674 CU_ASSERT(io_ch != NULL); 2675 channel = spdk_io_channel_get_ctx(io_ch); 2676 mgmt_ch = channel->shared_resource->mgmt_ch; 2677 2678 bdev->optimal_io_boundary = 16; 2679 bdev->split_on_optimal_io_boundary = true; 2680 2681 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2682 CU_ASSERT(rc == 0); 2683 2684 /* Now test that a single-vector command is split correctly. 2685 * Offset 14, length 8, payload 0xF000 2686 * Child - Offset 14, length 2, payload 0xF000 2687 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2688 * 2689 * Set up the expected values before calling spdk_bdev_read_blocks 2690 */ 2691 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2692 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2693 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2694 2695 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2696 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2697 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2698 2699 /* The following children will be submitted sequentially due to the capacity of 2700 * spdk_bdev_io. 2701 */ 2702 2703 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2704 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2705 CU_ASSERT(rc == 0); 2706 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2707 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2708 2709 /* Completing the first read I/O will submit the first child */ 2710 stub_complete_io(1); 2711 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2712 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2713 2714 /* Completing the first child will submit the second child */ 2715 stub_complete_io(1); 2716 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2717 2718 /* Complete the second child I/O. This should result in our callback getting 2719 * invoked since the parent I/O is now complete. 2720 */ 2721 stub_complete_io(1); 2722 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2723 2724 /* Now set up a more complex, multi-vector command that needs to be split, 2725 * including splitting iovecs. 2726 */ 2727 iov[0].iov_base = (void *)0x10000; 2728 iov[0].iov_len = 512; 2729 iov[1].iov_base = (void *)0x20000; 2730 iov[1].iov_len = 20 * 512; 2731 iov[2].iov_base = (void *)0x30000; 2732 iov[2].iov_len = 11 * 512; 2733 2734 g_io_done = false; 2735 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2736 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2737 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2738 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2739 2740 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2741 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2742 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2743 2744 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2745 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2746 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2747 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2748 2749 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2750 CU_ASSERT(rc == 0); 2751 CU_ASSERT(g_io_done == false); 2752 2753 /* The following children will be submitted sequentially due to the capacity of 2754 * spdk_bdev_io. 2755 */ 2756 2757 /* Completing the first child will submit the second child */ 2758 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2759 stub_complete_io(1); 2760 CU_ASSERT(g_io_done == false); 2761 2762 /* Completing the second child will submit the third child */ 2763 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2764 stub_complete_io(1); 2765 CU_ASSERT(g_io_done == false); 2766 2767 /* Completing the third child will result in our callback getting invoked 2768 * since the parent I/O is now complete. 2769 */ 2770 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2771 stub_complete_io(1); 2772 CU_ASSERT(g_io_done == true); 2773 2774 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2775 2776 spdk_put_io_channel(io_ch); 2777 spdk_bdev_close(desc); 2778 free_bdev(bdev); 2779 spdk_bdev_finish(bdev_fini_cb, NULL); 2780 poll_threads(); 2781 } 2782 2783 static void 2784 bdev_io_alignment(void) 2785 { 2786 struct spdk_bdev *bdev; 2787 struct spdk_bdev_desc *desc = NULL; 2788 struct spdk_io_channel *io_ch; 2789 struct spdk_bdev_opts bdev_opts = {}; 2790 int rc; 2791 void *buf = NULL; 2792 struct iovec iovs[2]; 2793 int iovcnt; 2794 uint64_t alignment; 2795 2796 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2797 bdev_opts.bdev_io_pool_size = 20; 2798 bdev_opts.bdev_io_cache_size = 2; 2799 2800 rc = spdk_bdev_set_opts(&bdev_opts); 2801 CU_ASSERT(rc == 0); 2802 spdk_bdev_initialize(bdev_init_cb, NULL); 2803 2804 fn_table.submit_request = stub_submit_request_get_buf; 2805 bdev = allocate_bdev("bdev0"); 2806 2807 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2808 CU_ASSERT(rc == 0); 2809 CU_ASSERT(desc != NULL); 2810 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2811 io_ch = spdk_bdev_get_io_channel(desc); 2812 CU_ASSERT(io_ch != NULL); 2813 2814 /* Create aligned buffer */ 2815 rc = posix_memalign(&buf, 4096, 8192); 2816 SPDK_CU_ASSERT_FATAL(rc == 0); 2817 2818 /* Pass aligned single buffer with no alignment required */ 2819 alignment = 1; 2820 bdev->required_alignment = spdk_u32log2(alignment); 2821 2822 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2823 CU_ASSERT(rc == 0); 2824 stub_complete_io(1); 2825 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2826 alignment)); 2827 2828 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2829 CU_ASSERT(rc == 0); 2830 stub_complete_io(1); 2831 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2832 alignment)); 2833 2834 /* Pass unaligned single buffer with no alignment required */ 2835 alignment = 1; 2836 bdev->required_alignment = spdk_u32log2(alignment); 2837 2838 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2839 CU_ASSERT(rc == 0); 2840 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2841 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2842 stub_complete_io(1); 2843 2844 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2845 CU_ASSERT(rc == 0); 2846 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2847 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2848 stub_complete_io(1); 2849 2850 /* Pass unaligned single buffer with 512 alignment required */ 2851 alignment = 512; 2852 bdev->required_alignment = spdk_u32log2(alignment); 2853 2854 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2855 CU_ASSERT(rc == 0); 2856 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2857 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2858 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2859 alignment)); 2860 stub_complete_io(1); 2861 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2862 2863 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2864 CU_ASSERT(rc == 0); 2865 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2866 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2867 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2868 alignment)); 2869 stub_complete_io(1); 2870 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2871 2872 /* Pass unaligned single buffer with 4096 alignment required */ 2873 alignment = 4096; 2874 bdev->required_alignment = spdk_u32log2(alignment); 2875 2876 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2877 CU_ASSERT(rc == 0); 2878 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2879 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2880 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2881 alignment)); 2882 stub_complete_io(1); 2883 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2884 2885 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2886 CU_ASSERT(rc == 0); 2887 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2888 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2889 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2890 alignment)); 2891 stub_complete_io(1); 2892 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2893 2894 /* Pass aligned iovs with no alignment required */ 2895 alignment = 1; 2896 bdev->required_alignment = spdk_u32log2(alignment); 2897 2898 iovcnt = 1; 2899 iovs[0].iov_base = buf; 2900 iovs[0].iov_len = 512; 2901 2902 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2903 CU_ASSERT(rc == 0); 2904 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2905 stub_complete_io(1); 2906 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2907 2908 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2909 CU_ASSERT(rc == 0); 2910 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2911 stub_complete_io(1); 2912 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2913 2914 /* Pass unaligned iovs with no alignment required */ 2915 alignment = 1; 2916 bdev->required_alignment = spdk_u32log2(alignment); 2917 2918 iovcnt = 2; 2919 iovs[0].iov_base = buf + 16; 2920 iovs[0].iov_len = 256; 2921 iovs[1].iov_base = buf + 16 + 256 + 32; 2922 iovs[1].iov_len = 256; 2923 2924 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2925 CU_ASSERT(rc == 0); 2926 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2927 stub_complete_io(1); 2928 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2929 2930 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2931 CU_ASSERT(rc == 0); 2932 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2933 stub_complete_io(1); 2934 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2935 2936 /* Pass unaligned iov with 2048 alignment required */ 2937 alignment = 2048; 2938 bdev->required_alignment = spdk_u32log2(alignment); 2939 2940 iovcnt = 2; 2941 iovs[0].iov_base = buf + 16; 2942 iovs[0].iov_len = 256; 2943 iovs[1].iov_base = buf + 16 + 256 + 32; 2944 iovs[1].iov_len = 256; 2945 2946 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2947 CU_ASSERT(rc == 0); 2948 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2949 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2950 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2951 alignment)); 2952 stub_complete_io(1); 2953 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2954 2955 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2956 CU_ASSERT(rc == 0); 2957 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2958 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2959 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2960 alignment)); 2961 stub_complete_io(1); 2962 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2963 2964 /* Pass iov without allocated buffer without alignment required */ 2965 alignment = 1; 2966 bdev->required_alignment = spdk_u32log2(alignment); 2967 2968 iovcnt = 1; 2969 iovs[0].iov_base = NULL; 2970 iovs[0].iov_len = 0; 2971 2972 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2973 CU_ASSERT(rc == 0); 2974 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2975 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2976 alignment)); 2977 stub_complete_io(1); 2978 2979 /* Pass iov without allocated buffer with 1024 alignment required */ 2980 alignment = 1024; 2981 bdev->required_alignment = spdk_u32log2(alignment); 2982 2983 iovcnt = 1; 2984 iovs[0].iov_base = NULL; 2985 iovs[0].iov_len = 0; 2986 2987 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2988 CU_ASSERT(rc == 0); 2989 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2990 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2991 alignment)); 2992 stub_complete_io(1); 2993 2994 spdk_put_io_channel(io_ch); 2995 spdk_bdev_close(desc); 2996 free_bdev(bdev); 2997 fn_table.submit_request = stub_submit_request; 2998 spdk_bdev_finish(bdev_fini_cb, NULL); 2999 poll_threads(); 3000 3001 free(buf); 3002 } 3003 3004 static void 3005 bdev_io_alignment_with_boundary(void) 3006 { 3007 struct spdk_bdev *bdev; 3008 struct spdk_bdev_desc *desc = NULL; 3009 struct spdk_io_channel *io_ch; 3010 struct spdk_bdev_opts bdev_opts = {}; 3011 int rc; 3012 void *buf = NULL; 3013 struct iovec iovs[2]; 3014 int iovcnt; 3015 uint64_t alignment; 3016 3017 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3018 bdev_opts.bdev_io_pool_size = 20; 3019 bdev_opts.bdev_io_cache_size = 2; 3020 3021 bdev_opts.opts_size = sizeof(bdev_opts); 3022 rc = spdk_bdev_set_opts(&bdev_opts); 3023 CU_ASSERT(rc == 0); 3024 spdk_bdev_initialize(bdev_init_cb, NULL); 3025 3026 fn_table.submit_request = stub_submit_request_get_buf; 3027 bdev = allocate_bdev("bdev0"); 3028 3029 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3030 CU_ASSERT(rc == 0); 3031 CU_ASSERT(desc != NULL); 3032 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3033 io_ch = spdk_bdev_get_io_channel(desc); 3034 CU_ASSERT(io_ch != NULL); 3035 3036 /* Create aligned buffer */ 3037 rc = posix_memalign(&buf, 4096, 131072); 3038 SPDK_CU_ASSERT_FATAL(rc == 0); 3039 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3040 3041 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3042 alignment = 512; 3043 bdev->required_alignment = spdk_u32log2(alignment); 3044 bdev->optimal_io_boundary = 2; 3045 bdev->split_on_optimal_io_boundary = true; 3046 3047 iovcnt = 1; 3048 iovs[0].iov_base = NULL; 3049 iovs[0].iov_len = 512 * 3; 3050 3051 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3052 CU_ASSERT(rc == 0); 3053 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3054 stub_complete_io(2); 3055 3056 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3057 alignment = 512; 3058 bdev->required_alignment = spdk_u32log2(alignment); 3059 bdev->optimal_io_boundary = 16; 3060 bdev->split_on_optimal_io_boundary = true; 3061 3062 iovcnt = 1; 3063 iovs[0].iov_base = NULL; 3064 iovs[0].iov_len = 512 * 16; 3065 3066 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3067 CU_ASSERT(rc == 0); 3068 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3069 stub_complete_io(2); 3070 3071 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3072 alignment = 512; 3073 bdev->required_alignment = spdk_u32log2(alignment); 3074 bdev->optimal_io_boundary = 128; 3075 bdev->split_on_optimal_io_boundary = true; 3076 3077 iovcnt = 1; 3078 iovs[0].iov_base = buf + 16; 3079 iovs[0].iov_len = 512 * 160; 3080 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3081 CU_ASSERT(rc == 0); 3082 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3083 stub_complete_io(2); 3084 3085 /* 512 * 3 with 2 IO boundary */ 3086 alignment = 512; 3087 bdev->required_alignment = spdk_u32log2(alignment); 3088 bdev->optimal_io_boundary = 2; 3089 bdev->split_on_optimal_io_boundary = true; 3090 3091 iovcnt = 2; 3092 iovs[0].iov_base = buf + 16; 3093 iovs[0].iov_len = 512; 3094 iovs[1].iov_base = buf + 16 + 512 + 32; 3095 iovs[1].iov_len = 1024; 3096 3097 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3098 CU_ASSERT(rc == 0); 3099 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3100 stub_complete_io(2); 3101 3102 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3103 CU_ASSERT(rc == 0); 3104 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3105 stub_complete_io(2); 3106 3107 /* 512 * 64 with 32 IO boundary */ 3108 bdev->optimal_io_boundary = 32; 3109 iovcnt = 2; 3110 iovs[0].iov_base = buf + 16; 3111 iovs[0].iov_len = 16384; 3112 iovs[1].iov_base = buf + 16 + 16384 + 32; 3113 iovs[1].iov_len = 16384; 3114 3115 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3116 CU_ASSERT(rc == 0); 3117 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3118 stub_complete_io(3); 3119 3120 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3121 CU_ASSERT(rc == 0); 3122 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3123 stub_complete_io(3); 3124 3125 /* 512 * 160 with 32 IO boundary */ 3126 iovcnt = 1; 3127 iovs[0].iov_base = buf + 16; 3128 iovs[0].iov_len = 16384 + 65536; 3129 3130 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3131 CU_ASSERT(rc == 0); 3132 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3133 stub_complete_io(6); 3134 3135 spdk_put_io_channel(io_ch); 3136 spdk_bdev_close(desc); 3137 free_bdev(bdev); 3138 fn_table.submit_request = stub_submit_request; 3139 spdk_bdev_finish(bdev_fini_cb, NULL); 3140 poll_threads(); 3141 3142 free(buf); 3143 } 3144 3145 static void 3146 histogram_status_cb(void *cb_arg, int status) 3147 { 3148 g_status = status; 3149 } 3150 3151 static void 3152 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3153 { 3154 g_status = status; 3155 g_histogram = histogram; 3156 } 3157 3158 static void 3159 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3160 uint64_t total, uint64_t so_far) 3161 { 3162 g_count += count; 3163 } 3164 3165 static void 3166 bdev_histograms(void) 3167 { 3168 struct spdk_bdev *bdev; 3169 struct spdk_bdev_desc *desc = NULL; 3170 struct spdk_io_channel *ch; 3171 struct spdk_histogram_data *histogram; 3172 uint8_t buf[4096]; 3173 int rc; 3174 3175 spdk_bdev_initialize(bdev_init_cb, NULL); 3176 3177 bdev = allocate_bdev("bdev"); 3178 3179 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3180 CU_ASSERT(rc == 0); 3181 CU_ASSERT(desc != NULL); 3182 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3183 3184 ch = spdk_bdev_get_io_channel(desc); 3185 CU_ASSERT(ch != NULL); 3186 3187 /* Enable histogram */ 3188 g_status = -1; 3189 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3190 poll_threads(); 3191 CU_ASSERT(g_status == 0); 3192 CU_ASSERT(bdev->internal.histogram_enabled == true); 3193 3194 /* Allocate histogram */ 3195 histogram = spdk_histogram_data_alloc(); 3196 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3197 3198 /* Check if histogram is zeroed */ 3199 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3200 poll_threads(); 3201 CU_ASSERT(g_status == 0); 3202 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3203 3204 g_count = 0; 3205 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3206 3207 CU_ASSERT(g_count == 0); 3208 3209 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3210 CU_ASSERT(rc == 0); 3211 3212 spdk_delay_us(10); 3213 stub_complete_io(1); 3214 poll_threads(); 3215 3216 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3217 CU_ASSERT(rc == 0); 3218 3219 spdk_delay_us(10); 3220 stub_complete_io(1); 3221 poll_threads(); 3222 3223 /* Check if histogram gathered data from all I/O channels */ 3224 g_histogram = NULL; 3225 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3226 poll_threads(); 3227 CU_ASSERT(g_status == 0); 3228 CU_ASSERT(bdev->internal.histogram_enabled == true); 3229 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3230 3231 g_count = 0; 3232 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3233 CU_ASSERT(g_count == 2); 3234 3235 /* Disable histogram */ 3236 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3237 poll_threads(); 3238 CU_ASSERT(g_status == 0); 3239 CU_ASSERT(bdev->internal.histogram_enabled == false); 3240 3241 /* Try to run histogram commands on disabled bdev */ 3242 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3243 poll_threads(); 3244 CU_ASSERT(g_status == -EFAULT); 3245 3246 spdk_histogram_data_free(histogram); 3247 spdk_put_io_channel(ch); 3248 spdk_bdev_close(desc); 3249 free_bdev(bdev); 3250 spdk_bdev_finish(bdev_fini_cb, NULL); 3251 poll_threads(); 3252 } 3253 3254 static void 3255 _bdev_compare(bool emulated) 3256 { 3257 struct spdk_bdev *bdev; 3258 struct spdk_bdev_desc *desc = NULL; 3259 struct spdk_io_channel *ioch; 3260 struct ut_expected_io *expected_io; 3261 uint64_t offset, num_blocks; 3262 uint32_t num_completed; 3263 char aa_buf[512]; 3264 char bb_buf[512]; 3265 struct iovec compare_iov; 3266 uint8_t io_type; 3267 int rc; 3268 3269 if (emulated) { 3270 io_type = SPDK_BDEV_IO_TYPE_READ; 3271 } else { 3272 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3273 } 3274 3275 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3276 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3277 3278 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3279 3280 spdk_bdev_initialize(bdev_init_cb, NULL); 3281 fn_table.submit_request = stub_submit_request_get_buf; 3282 bdev = allocate_bdev("bdev"); 3283 3284 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3285 CU_ASSERT_EQUAL(rc, 0); 3286 SPDK_CU_ASSERT_FATAL(desc != NULL); 3287 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3288 ioch = spdk_bdev_get_io_channel(desc); 3289 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3290 3291 fn_table.submit_request = stub_submit_request_get_buf; 3292 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3293 3294 offset = 50; 3295 num_blocks = 1; 3296 compare_iov.iov_base = aa_buf; 3297 compare_iov.iov_len = sizeof(aa_buf); 3298 3299 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3300 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3301 3302 g_io_done = false; 3303 g_compare_read_buf = aa_buf; 3304 g_compare_read_buf_len = sizeof(aa_buf); 3305 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3306 CU_ASSERT_EQUAL(rc, 0); 3307 num_completed = stub_complete_io(1); 3308 CU_ASSERT_EQUAL(num_completed, 1); 3309 CU_ASSERT(g_io_done == true); 3310 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3311 3312 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3313 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3314 3315 g_io_done = false; 3316 g_compare_read_buf = bb_buf; 3317 g_compare_read_buf_len = sizeof(bb_buf); 3318 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3319 CU_ASSERT_EQUAL(rc, 0); 3320 num_completed = stub_complete_io(1); 3321 CU_ASSERT_EQUAL(num_completed, 1); 3322 CU_ASSERT(g_io_done == true); 3323 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3324 3325 spdk_put_io_channel(ioch); 3326 spdk_bdev_close(desc); 3327 free_bdev(bdev); 3328 fn_table.submit_request = stub_submit_request; 3329 spdk_bdev_finish(bdev_fini_cb, NULL); 3330 poll_threads(); 3331 3332 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3333 3334 g_compare_read_buf = NULL; 3335 } 3336 3337 static void 3338 bdev_compare(void) 3339 { 3340 _bdev_compare(true); 3341 _bdev_compare(false); 3342 } 3343 3344 static void 3345 bdev_compare_and_write(void) 3346 { 3347 struct spdk_bdev *bdev; 3348 struct spdk_bdev_desc *desc = NULL; 3349 struct spdk_io_channel *ioch; 3350 struct ut_expected_io *expected_io; 3351 uint64_t offset, num_blocks; 3352 uint32_t num_completed; 3353 char aa_buf[512]; 3354 char bb_buf[512]; 3355 char cc_buf[512]; 3356 char write_buf[512]; 3357 struct iovec compare_iov; 3358 struct iovec write_iov; 3359 int rc; 3360 3361 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3362 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3363 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3364 3365 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3366 3367 spdk_bdev_initialize(bdev_init_cb, NULL); 3368 fn_table.submit_request = stub_submit_request_get_buf; 3369 bdev = allocate_bdev("bdev"); 3370 3371 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3372 CU_ASSERT_EQUAL(rc, 0); 3373 SPDK_CU_ASSERT_FATAL(desc != NULL); 3374 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3375 ioch = spdk_bdev_get_io_channel(desc); 3376 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3377 3378 fn_table.submit_request = stub_submit_request_get_buf; 3379 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3380 3381 offset = 50; 3382 num_blocks = 1; 3383 compare_iov.iov_base = aa_buf; 3384 compare_iov.iov_len = sizeof(aa_buf); 3385 write_iov.iov_base = bb_buf; 3386 write_iov.iov_len = sizeof(bb_buf); 3387 3388 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3389 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3390 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3391 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3392 3393 g_io_done = false; 3394 g_compare_read_buf = aa_buf; 3395 g_compare_read_buf_len = sizeof(aa_buf); 3396 memset(write_buf, 0, sizeof(write_buf)); 3397 g_compare_write_buf = write_buf; 3398 g_compare_write_buf_len = sizeof(write_buf); 3399 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3400 offset, num_blocks, io_done, NULL); 3401 /* Trigger range locking */ 3402 poll_threads(); 3403 CU_ASSERT_EQUAL(rc, 0); 3404 num_completed = stub_complete_io(1); 3405 CU_ASSERT_EQUAL(num_completed, 1); 3406 CU_ASSERT(g_io_done == false); 3407 num_completed = stub_complete_io(1); 3408 /* Trigger range unlocking */ 3409 poll_threads(); 3410 CU_ASSERT_EQUAL(num_completed, 1); 3411 CU_ASSERT(g_io_done == true); 3412 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3413 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3414 3415 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3416 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3417 3418 g_io_done = false; 3419 g_compare_read_buf = cc_buf; 3420 g_compare_read_buf_len = sizeof(cc_buf); 3421 memset(write_buf, 0, sizeof(write_buf)); 3422 g_compare_write_buf = write_buf; 3423 g_compare_write_buf_len = sizeof(write_buf); 3424 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3425 offset, num_blocks, io_done, NULL); 3426 /* Trigger range locking */ 3427 poll_threads(); 3428 CU_ASSERT_EQUAL(rc, 0); 3429 num_completed = stub_complete_io(1); 3430 /* Trigger range unlocking earlier because we expect error here */ 3431 poll_threads(); 3432 CU_ASSERT_EQUAL(num_completed, 1); 3433 CU_ASSERT(g_io_done == true); 3434 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3435 num_completed = stub_complete_io(1); 3436 CU_ASSERT_EQUAL(num_completed, 0); 3437 3438 spdk_put_io_channel(ioch); 3439 spdk_bdev_close(desc); 3440 free_bdev(bdev); 3441 fn_table.submit_request = stub_submit_request; 3442 spdk_bdev_finish(bdev_fini_cb, NULL); 3443 poll_threads(); 3444 3445 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3446 3447 g_compare_read_buf = NULL; 3448 g_compare_write_buf = NULL; 3449 } 3450 3451 static void 3452 bdev_write_zeroes(void) 3453 { 3454 struct spdk_bdev *bdev; 3455 struct spdk_bdev_desc *desc = NULL; 3456 struct spdk_io_channel *ioch; 3457 struct ut_expected_io *expected_io; 3458 uint64_t offset, num_io_blocks, num_blocks; 3459 uint32_t num_completed, num_requests; 3460 int rc; 3461 3462 spdk_bdev_initialize(bdev_init_cb, NULL); 3463 bdev = allocate_bdev("bdev"); 3464 3465 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3466 CU_ASSERT_EQUAL(rc, 0); 3467 SPDK_CU_ASSERT_FATAL(desc != NULL); 3468 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3469 ioch = spdk_bdev_get_io_channel(desc); 3470 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3471 3472 fn_table.submit_request = stub_submit_request; 3473 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3474 3475 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3476 bdev->md_len = 0; 3477 bdev->blocklen = 4096; 3478 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3479 3480 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3481 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3482 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3483 CU_ASSERT_EQUAL(rc, 0); 3484 num_completed = stub_complete_io(1); 3485 CU_ASSERT_EQUAL(num_completed, 1); 3486 3487 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3488 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3489 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3490 num_requests = 2; 3491 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3492 3493 for (offset = 0; offset < num_requests; ++offset) { 3494 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3495 offset * num_io_blocks, num_io_blocks, 0); 3496 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3497 } 3498 3499 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3500 CU_ASSERT_EQUAL(rc, 0); 3501 num_completed = stub_complete_io(num_requests); 3502 CU_ASSERT_EQUAL(num_completed, num_requests); 3503 3504 /* Check that the splitting is correct if bdev has interleaved metadata */ 3505 bdev->md_interleave = true; 3506 bdev->md_len = 64; 3507 bdev->blocklen = 4096 + 64; 3508 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3509 3510 num_requests = offset = 0; 3511 while (offset < num_blocks) { 3512 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3514 offset, num_io_blocks, 0); 3515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3516 offset += num_io_blocks; 3517 num_requests++; 3518 } 3519 3520 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3521 CU_ASSERT_EQUAL(rc, 0); 3522 num_completed = stub_complete_io(num_requests); 3523 CU_ASSERT_EQUAL(num_completed, num_requests); 3524 num_completed = stub_complete_io(num_requests); 3525 assert(num_completed == 0); 3526 3527 /* Check the the same for separate metadata buffer */ 3528 bdev->md_interleave = false; 3529 bdev->md_len = 64; 3530 bdev->blocklen = 4096; 3531 3532 num_requests = offset = 0; 3533 while (offset < num_blocks) { 3534 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3535 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3536 offset, num_io_blocks, 0); 3537 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3538 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3539 offset += num_io_blocks; 3540 num_requests++; 3541 } 3542 3543 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3544 CU_ASSERT_EQUAL(rc, 0); 3545 num_completed = stub_complete_io(num_requests); 3546 CU_ASSERT_EQUAL(num_completed, num_requests); 3547 3548 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3549 spdk_put_io_channel(ioch); 3550 spdk_bdev_close(desc); 3551 free_bdev(bdev); 3552 spdk_bdev_finish(bdev_fini_cb, NULL); 3553 poll_threads(); 3554 } 3555 3556 static void 3557 bdev_zcopy_write(void) 3558 { 3559 struct spdk_bdev *bdev; 3560 struct spdk_bdev_desc *desc = NULL; 3561 struct spdk_io_channel *ioch; 3562 struct ut_expected_io *expected_io; 3563 uint64_t offset, num_blocks; 3564 uint32_t num_completed; 3565 char aa_buf[512]; 3566 struct iovec iov; 3567 int rc; 3568 const bool populate = false; 3569 const bool commit = true; 3570 3571 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3572 3573 spdk_bdev_initialize(bdev_init_cb, NULL); 3574 bdev = allocate_bdev("bdev"); 3575 3576 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3577 CU_ASSERT_EQUAL(rc, 0); 3578 SPDK_CU_ASSERT_FATAL(desc != NULL); 3579 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3580 ioch = spdk_bdev_get_io_channel(desc); 3581 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3582 3583 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3584 3585 offset = 50; 3586 num_blocks = 1; 3587 iov.iov_base = NULL; 3588 iov.iov_len = 0; 3589 3590 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3591 g_zcopy_read_buf_len = (uint32_t) -1; 3592 /* Do a zcopy start for a write (populate=false) */ 3593 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3594 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3595 g_io_done = false; 3596 g_zcopy_write_buf = aa_buf; 3597 g_zcopy_write_buf_len = sizeof(aa_buf); 3598 g_zcopy_bdev_io = NULL; 3599 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3600 CU_ASSERT_EQUAL(rc, 0); 3601 num_completed = stub_complete_io(1); 3602 CU_ASSERT_EQUAL(num_completed, 1); 3603 CU_ASSERT(g_io_done == true); 3604 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3605 /* Check that the iov has been set up */ 3606 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3607 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3608 /* Check that the bdev_io has been saved */ 3609 CU_ASSERT(g_zcopy_bdev_io != NULL); 3610 /* Now do the zcopy end for a write (commit=true) */ 3611 g_io_done = false; 3612 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3614 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3615 CU_ASSERT_EQUAL(rc, 0); 3616 num_completed = stub_complete_io(1); 3617 CU_ASSERT_EQUAL(num_completed, 1); 3618 CU_ASSERT(g_io_done == true); 3619 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3620 /* Check the g_zcopy are reset by io_done */ 3621 CU_ASSERT(g_zcopy_write_buf == NULL); 3622 CU_ASSERT(g_zcopy_write_buf_len == 0); 3623 /* Check that io_done has freed the g_zcopy_bdev_io */ 3624 CU_ASSERT(g_zcopy_bdev_io == NULL); 3625 3626 /* Check the zcopy read buffer has not been touched which 3627 * ensures that the correct buffers were used. 3628 */ 3629 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3630 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3631 3632 spdk_put_io_channel(ioch); 3633 spdk_bdev_close(desc); 3634 free_bdev(bdev); 3635 spdk_bdev_finish(bdev_fini_cb, NULL); 3636 poll_threads(); 3637 } 3638 3639 static void 3640 bdev_zcopy_read(void) 3641 { 3642 struct spdk_bdev *bdev; 3643 struct spdk_bdev_desc *desc = NULL; 3644 struct spdk_io_channel *ioch; 3645 struct ut_expected_io *expected_io; 3646 uint64_t offset, num_blocks; 3647 uint32_t num_completed; 3648 char aa_buf[512]; 3649 struct iovec iov; 3650 int rc; 3651 const bool populate = true; 3652 const bool commit = false; 3653 3654 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3655 3656 spdk_bdev_initialize(bdev_init_cb, NULL); 3657 bdev = allocate_bdev("bdev"); 3658 3659 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3660 CU_ASSERT_EQUAL(rc, 0); 3661 SPDK_CU_ASSERT_FATAL(desc != NULL); 3662 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3663 ioch = spdk_bdev_get_io_channel(desc); 3664 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3665 3666 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3667 3668 offset = 50; 3669 num_blocks = 1; 3670 iov.iov_base = NULL; 3671 iov.iov_len = 0; 3672 3673 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3674 g_zcopy_write_buf_len = (uint32_t) -1; 3675 3676 /* Do a zcopy start for a read (populate=true) */ 3677 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3678 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3679 g_io_done = false; 3680 g_zcopy_read_buf = aa_buf; 3681 g_zcopy_read_buf_len = sizeof(aa_buf); 3682 g_zcopy_bdev_io = NULL; 3683 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3684 CU_ASSERT_EQUAL(rc, 0); 3685 num_completed = stub_complete_io(1); 3686 CU_ASSERT_EQUAL(num_completed, 1); 3687 CU_ASSERT(g_io_done == true); 3688 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3689 /* Check that the iov has been set up */ 3690 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3691 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3692 /* Check that the bdev_io has been saved */ 3693 CU_ASSERT(g_zcopy_bdev_io != NULL); 3694 3695 /* Now do the zcopy end for a read (commit=false) */ 3696 g_io_done = false; 3697 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3698 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3699 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3700 CU_ASSERT_EQUAL(rc, 0); 3701 num_completed = stub_complete_io(1); 3702 CU_ASSERT_EQUAL(num_completed, 1); 3703 CU_ASSERT(g_io_done == true); 3704 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3705 /* Check the g_zcopy are reset by io_done */ 3706 CU_ASSERT(g_zcopy_read_buf == NULL); 3707 CU_ASSERT(g_zcopy_read_buf_len == 0); 3708 /* Check that io_done has freed the g_zcopy_bdev_io */ 3709 CU_ASSERT(g_zcopy_bdev_io == NULL); 3710 3711 /* Check the zcopy write buffer has not been touched which 3712 * ensures that the correct buffers were used. 3713 */ 3714 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3715 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3716 3717 spdk_put_io_channel(ioch); 3718 spdk_bdev_close(desc); 3719 free_bdev(bdev); 3720 spdk_bdev_finish(bdev_fini_cb, NULL); 3721 poll_threads(); 3722 } 3723 3724 static void 3725 bdev_open_while_hotremove(void) 3726 { 3727 struct spdk_bdev *bdev; 3728 struct spdk_bdev_desc *desc[2] = {}; 3729 int rc; 3730 3731 bdev = allocate_bdev("bdev"); 3732 3733 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3734 CU_ASSERT(rc == 0); 3735 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3736 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3737 3738 spdk_bdev_unregister(bdev, NULL, NULL); 3739 3740 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3741 CU_ASSERT(rc == -ENODEV); 3742 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3743 3744 spdk_bdev_close(desc[0]); 3745 free_bdev(bdev); 3746 } 3747 3748 static void 3749 bdev_close_while_hotremove(void) 3750 { 3751 struct spdk_bdev *bdev; 3752 struct spdk_bdev_desc *desc = NULL; 3753 int rc = 0; 3754 3755 bdev = allocate_bdev("bdev"); 3756 3757 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3758 CU_ASSERT_EQUAL(rc, 0); 3759 SPDK_CU_ASSERT_FATAL(desc != NULL); 3760 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3761 3762 /* Simulate hot-unplug by unregistering bdev */ 3763 g_event_type1 = 0xFF; 3764 g_unregister_arg = NULL; 3765 g_unregister_rc = -1; 3766 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3767 /* Close device while remove event is in flight */ 3768 spdk_bdev_close(desc); 3769 3770 /* Ensure that unregister callback is delayed */ 3771 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3772 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3773 3774 poll_threads(); 3775 3776 /* Event callback shall not be issued because device was closed */ 3777 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3778 /* Unregister callback is issued */ 3779 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3780 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3781 3782 free_bdev(bdev); 3783 } 3784 3785 static void 3786 bdev_open_ext(void) 3787 { 3788 struct spdk_bdev *bdev; 3789 struct spdk_bdev_desc *desc1 = NULL; 3790 struct spdk_bdev_desc *desc2 = NULL; 3791 int rc = 0; 3792 3793 bdev = allocate_bdev("bdev"); 3794 3795 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3796 CU_ASSERT_EQUAL(rc, -EINVAL); 3797 3798 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3799 CU_ASSERT_EQUAL(rc, 0); 3800 3801 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3802 CU_ASSERT_EQUAL(rc, 0); 3803 3804 g_event_type1 = 0xFF; 3805 g_event_type2 = 0xFF; 3806 3807 /* Simulate hot-unplug by unregistering bdev */ 3808 spdk_bdev_unregister(bdev, NULL, NULL); 3809 poll_threads(); 3810 3811 /* Check if correct events have been triggered in event callback fn */ 3812 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3813 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3814 3815 free_bdev(bdev); 3816 poll_threads(); 3817 } 3818 3819 struct timeout_io_cb_arg { 3820 struct iovec iov; 3821 uint8_t type; 3822 }; 3823 3824 static int 3825 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3826 { 3827 struct spdk_bdev_io *bdev_io; 3828 int n = 0; 3829 3830 if (!ch) { 3831 return -1; 3832 } 3833 3834 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3835 n++; 3836 } 3837 3838 return n; 3839 } 3840 3841 static void 3842 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3843 { 3844 struct timeout_io_cb_arg *ctx = cb_arg; 3845 3846 ctx->type = bdev_io->type; 3847 ctx->iov.iov_base = bdev_io->iov.iov_base; 3848 ctx->iov.iov_len = bdev_io->iov.iov_len; 3849 } 3850 3851 static void 3852 bdev_set_io_timeout(void) 3853 { 3854 struct spdk_bdev *bdev; 3855 struct spdk_bdev_desc *desc = NULL; 3856 struct spdk_io_channel *io_ch = NULL; 3857 struct spdk_bdev_channel *bdev_ch = NULL; 3858 struct timeout_io_cb_arg cb_arg; 3859 3860 spdk_bdev_initialize(bdev_init_cb, NULL); 3861 3862 bdev = allocate_bdev("bdev"); 3863 3864 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3865 SPDK_CU_ASSERT_FATAL(desc != NULL); 3866 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3867 3868 io_ch = spdk_bdev_get_io_channel(desc); 3869 CU_ASSERT(io_ch != NULL); 3870 3871 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3872 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3873 3874 /* This is the part1. 3875 * We will check the bdev_ch->io_submitted list 3876 * TO make sure that it can link IOs and only the user submitted IOs 3877 */ 3878 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3879 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3880 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3881 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3882 stub_complete_io(1); 3883 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3884 stub_complete_io(1); 3885 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3886 3887 /* Split IO */ 3888 bdev->optimal_io_boundary = 16; 3889 bdev->split_on_optimal_io_boundary = true; 3890 3891 /* Now test that a single-vector command is split correctly. 3892 * Offset 14, length 8, payload 0xF000 3893 * Child - Offset 14, length 2, payload 0xF000 3894 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3895 * 3896 * Set up the expected values before calling spdk_bdev_read_blocks 3897 */ 3898 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3899 /* We count all submitted IOs including IO that are generated by splitting. */ 3900 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 3901 stub_complete_io(1); 3902 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3903 stub_complete_io(1); 3904 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3905 3906 /* Also include the reset IO */ 3907 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3908 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3909 poll_threads(); 3910 stub_complete_io(1); 3911 poll_threads(); 3912 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3913 3914 /* This is part2 3915 * Test the desc timeout poller register 3916 */ 3917 3918 /* Successfully set the timeout */ 3919 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3920 CU_ASSERT(desc->io_timeout_poller != NULL); 3921 CU_ASSERT(desc->timeout_in_sec == 30); 3922 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3923 CU_ASSERT(desc->cb_arg == &cb_arg); 3924 3925 /* Change the timeout limit */ 3926 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3927 CU_ASSERT(desc->io_timeout_poller != NULL); 3928 CU_ASSERT(desc->timeout_in_sec == 20); 3929 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3930 CU_ASSERT(desc->cb_arg == &cb_arg); 3931 3932 /* Disable the timeout */ 3933 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 3934 CU_ASSERT(desc->io_timeout_poller == NULL); 3935 3936 /* This the part3 3937 * We will test to catch timeout IO and check whether the IO is 3938 * the submitted one. 3939 */ 3940 memset(&cb_arg, 0, sizeof(cb_arg)); 3941 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3942 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 3943 3944 /* Don't reach the limit */ 3945 spdk_delay_us(15 * spdk_get_ticks_hz()); 3946 poll_threads(); 3947 CU_ASSERT(cb_arg.type == 0); 3948 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3949 CU_ASSERT(cb_arg.iov.iov_len == 0); 3950 3951 /* 15 + 15 = 30 reach the limit */ 3952 spdk_delay_us(15 * spdk_get_ticks_hz()); 3953 poll_threads(); 3954 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3955 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 3956 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 3957 stub_complete_io(1); 3958 3959 /* Use the same split IO above and check the IO */ 3960 memset(&cb_arg, 0, sizeof(cb_arg)); 3961 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3962 3963 /* The first child complete in time */ 3964 spdk_delay_us(15 * spdk_get_ticks_hz()); 3965 poll_threads(); 3966 stub_complete_io(1); 3967 CU_ASSERT(cb_arg.type == 0); 3968 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3969 CU_ASSERT(cb_arg.iov.iov_len == 0); 3970 3971 /* The second child reach the limit */ 3972 spdk_delay_us(15 * spdk_get_ticks_hz()); 3973 poll_threads(); 3974 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3975 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 3976 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 3977 stub_complete_io(1); 3978 3979 /* Also include the reset IO */ 3980 memset(&cb_arg, 0, sizeof(cb_arg)); 3981 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3982 spdk_delay_us(30 * spdk_get_ticks_hz()); 3983 poll_threads(); 3984 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 3985 stub_complete_io(1); 3986 poll_threads(); 3987 3988 spdk_put_io_channel(io_ch); 3989 spdk_bdev_close(desc); 3990 free_bdev(bdev); 3991 spdk_bdev_finish(bdev_fini_cb, NULL); 3992 poll_threads(); 3993 } 3994 3995 static void 3996 lba_range_overlap(void) 3997 { 3998 struct lba_range r1, r2; 3999 4000 r1.offset = 100; 4001 r1.length = 50; 4002 4003 r2.offset = 0; 4004 r2.length = 1; 4005 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4006 4007 r2.offset = 0; 4008 r2.length = 100; 4009 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4010 4011 r2.offset = 0; 4012 r2.length = 110; 4013 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4014 4015 r2.offset = 100; 4016 r2.length = 10; 4017 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4018 4019 r2.offset = 110; 4020 r2.length = 20; 4021 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4022 4023 r2.offset = 140; 4024 r2.length = 150; 4025 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4026 4027 r2.offset = 130; 4028 r2.length = 200; 4029 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4030 4031 r2.offset = 150; 4032 r2.length = 100; 4033 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4034 4035 r2.offset = 110; 4036 r2.length = 0; 4037 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4038 } 4039 4040 static bool g_lock_lba_range_done; 4041 static bool g_unlock_lba_range_done; 4042 4043 static void 4044 lock_lba_range_done(void *ctx, int status) 4045 { 4046 g_lock_lba_range_done = true; 4047 } 4048 4049 static void 4050 unlock_lba_range_done(void *ctx, int status) 4051 { 4052 g_unlock_lba_range_done = true; 4053 } 4054 4055 static void 4056 lock_lba_range_check_ranges(void) 4057 { 4058 struct spdk_bdev *bdev; 4059 struct spdk_bdev_desc *desc = NULL; 4060 struct spdk_io_channel *io_ch; 4061 struct spdk_bdev_channel *channel; 4062 struct lba_range *range; 4063 int ctx1; 4064 int rc; 4065 4066 spdk_bdev_initialize(bdev_init_cb, NULL); 4067 4068 bdev = allocate_bdev("bdev0"); 4069 4070 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4071 CU_ASSERT(rc == 0); 4072 CU_ASSERT(desc != NULL); 4073 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4074 io_ch = spdk_bdev_get_io_channel(desc); 4075 CU_ASSERT(io_ch != NULL); 4076 channel = spdk_io_channel_get_ctx(io_ch); 4077 4078 g_lock_lba_range_done = false; 4079 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4080 CU_ASSERT(rc == 0); 4081 poll_threads(); 4082 4083 CU_ASSERT(g_lock_lba_range_done == true); 4084 range = TAILQ_FIRST(&channel->locked_ranges); 4085 SPDK_CU_ASSERT_FATAL(range != NULL); 4086 CU_ASSERT(range->offset == 20); 4087 CU_ASSERT(range->length == 10); 4088 CU_ASSERT(range->owner_ch == channel); 4089 4090 /* Unlocks must exactly match a lock. */ 4091 g_unlock_lba_range_done = false; 4092 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4093 CU_ASSERT(rc == -EINVAL); 4094 CU_ASSERT(g_unlock_lba_range_done == false); 4095 4096 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4097 CU_ASSERT(rc == 0); 4098 spdk_delay_us(100); 4099 poll_threads(); 4100 4101 CU_ASSERT(g_unlock_lba_range_done == true); 4102 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4103 4104 spdk_put_io_channel(io_ch); 4105 spdk_bdev_close(desc); 4106 free_bdev(bdev); 4107 spdk_bdev_finish(bdev_fini_cb, NULL); 4108 poll_threads(); 4109 } 4110 4111 static void 4112 lock_lba_range_with_io_outstanding(void) 4113 { 4114 struct spdk_bdev *bdev; 4115 struct spdk_bdev_desc *desc = NULL; 4116 struct spdk_io_channel *io_ch; 4117 struct spdk_bdev_channel *channel; 4118 struct lba_range *range; 4119 char buf[4096]; 4120 int ctx1; 4121 int rc; 4122 4123 spdk_bdev_initialize(bdev_init_cb, NULL); 4124 4125 bdev = allocate_bdev("bdev0"); 4126 4127 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4128 CU_ASSERT(rc == 0); 4129 CU_ASSERT(desc != NULL); 4130 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4131 io_ch = spdk_bdev_get_io_channel(desc); 4132 CU_ASSERT(io_ch != NULL); 4133 channel = spdk_io_channel_get_ctx(io_ch); 4134 4135 g_io_done = false; 4136 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4137 CU_ASSERT(rc == 0); 4138 4139 g_lock_lba_range_done = false; 4140 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4141 CU_ASSERT(rc == 0); 4142 poll_threads(); 4143 4144 /* The lock should immediately become valid, since there are no outstanding 4145 * write I/O. 4146 */ 4147 CU_ASSERT(g_io_done == false); 4148 CU_ASSERT(g_lock_lba_range_done == true); 4149 range = TAILQ_FIRST(&channel->locked_ranges); 4150 SPDK_CU_ASSERT_FATAL(range != NULL); 4151 CU_ASSERT(range->offset == 20); 4152 CU_ASSERT(range->length == 10); 4153 CU_ASSERT(range->owner_ch == channel); 4154 CU_ASSERT(range->locked_ctx == &ctx1); 4155 4156 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4157 CU_ASSERT(rc == 0); 4158 stub_complete_io(1); 4159 spdk_delay_us(100); 4160 poll_threads(); 4161 4162 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4163 4164 /* Now try again, but with a write I/O. */ 4165 g_io_done = false; 4166 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4167 CU_ASSERT(rc == 0); 4168 4169 g_lock_lba_range_done = false; 4170 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4171 CU_ASSERT(rc == 0); 4172 poll_threads(); 4173 4174 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4175 * But note that the range should be on the channel's locked_list, to make sure no 4176 * new write I/O are started. 4177 */ 4178 CU_ASSERT(g_io_done == false); 4179 CU_ASSERT(g_lock_lba_range_done == false); 4180 range = TAILQ_FIRST(&channel->locked_ranges); 4181 SPDK_CU_ASSERT_FATAL(range != NULL); 4182 CU_ASSERT(range->offset == 20); 4183 CU_ASSERT(range->length == 10); 4184 4185 /* Complete the write I/O. This should make the lock valid (checked by confirming 4186 * our callback was invoked). 4187 */ 4188 stub_complete_io(1); 4189 spdk_delay_us(100); 4190 poll_threads(); 4191 CU_ASSERT(g_io_done == true); 4192 CU_ASSERT(g_lock_lba_range_done == true); 4193 4194 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4195 CU_ASSERT(rc == 0); 4196 poll_threads(); 4197 4198 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4199 4200 spdk_put_io_channel(io_ch); 4201 spdk_bdev_close(desc); 4202 free_bdev(bdev); 4203 spdk_bdev_finish(bdev_fini_cb, NULL); 4204 poll_threads(); 4205 } 4206 4207 static void 4208 lock_lba_range_overlapped(void) 4209 { 4210 struct spdk_bdev *bdev; 4211 struct spdk_bdev_desc *desc = NULL; 4212 struct spdk_io_channel *io_ch; 4213 struct spdk_bdev_channel *channel; 4214 struct lba_range *range; 4215 int ctx1; 4216 int rc; 4217 4218 spdk_bdev_initialize(bdev_init_cb, NULL); 4219 4220 bdev = allocate_bdev("bdev0"); 4221 4222 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4223 CU_ASSERT(rc == 0); 4224 CU_ASSERT(desc != NULL); 4225 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4226 io_ch = spdk_bdev_get_io_channel(desc); 4227 CU_ASSERT(io_ch != NULL); 4228 channel = spdk_io_channel_get_ctx(io_ch); 4229 4230 /* Lock range 20-29. */ 4231 g_lock_lba_range_done = false; 4232 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4233 CU_ASSERT(rc == 0); 4234 poll_threads(); 4235 4236 CU_ASSERT(g_lock_lba_range_done == true); 4237 range = TAILQ_FIRST(&channel->locked_ranges); 4238 SPDK_CU_ASSERT_FATAL(range != NULL); 4239 CU_ASSERT(range->offset == 20); 4240 CU_ASSERT(range->length == 10); 4241 4242 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4243 * 20-29. 4244 */ 4245 g_lock_lba_range_done = false; 4246 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4247 CU_ASSERT(rc == 0); 4248 poll_threads(); 4249 4250 CU_ASSERT(g_lock_lba_range_done == false); 4251 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4252 SPDK_CU_ASSERT_FATAL(range != NULL); 4253 CU_ASSERT(range->offset == 25); 4254 CU_ASSERT(range->length == 15); 4255 4256 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4257 * no longer overlaps with an active lock. 4258 */ 4259 g_unlock_lba_range_done = false; 4260 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4261 CU_ASSERT(rc == 0); 4262 poll_threads(); 4263 4264 CU_ASSERT(g_unlock_lba_range_done == true); 4265 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4266 range = TAILQ_FIRST(&channel->locked_ranges); 4267 SPDK_CU_ASSERT_FATAL(range != NULL); 4268 CU_ASSERT(range->offset == 25); 4269 CU_ASSERT(range->length == 15); 4270 4271 /* Lock 40-59. This should immediately lock since it does not overlap with the 4272 * currently active 25-39 lock. 4273 */ 4274 g_lock_lba_range_done = false; 4275 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4276 CU_ASSERT(rc == 0); 4277 poll_threads(); 4278 4279 CU_ASSERT(g_lock_lba_range_done == true); 4280 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4281 SPDK_CU_ASSERT_FATAL(range != NULL); 4282 range = TAILQ_NEXT(range, tailq); 4283 SPDK_CU_ASSERT_FATAL(range != NULL); 4284 CU_ASSERT(range->offset == 40); 4285 CU_ASSERT(range->length == 20); 4286 4287 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4288 g_lock_lba_range_done = false; 4289 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4290 CU_ASSERT(rc == 0); 4291 poll_threads(); 4292 4293 CU_ASSERT(g_lock_lba_range_done == false); 4294 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4295 SPDK_CU_ASSERT_FATAL(range != NULL); 4296 CU_ASSERT(range->offset == 35); 4297 CU_ASSERT(range->length == 10); 4298 4299 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4300 * the 40-59 lock is still active. 4301 */ 4302 g_unlock_lba_range_done = false; 4303 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4304 CU_ASSERT(rc == 0); 4305 poll_threads(); 4306 4307 CU_ASSERT(g_unlock_lba_range_done == true); 4308 CU_ASSERT(g_lock_lba_range_done == false); 4309 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4310 SPDK_CU_ASSERT_FATAL(range != NULL); 4311 CU_ASSERT(range->offset == 35); 4312 CU_ASSERT(range->length == 10); 4313 4314 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4315 * no longer any active overlapping locks. 4316 */ 4317 g_unlock_lba_range_done = false; 4318 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4319 CU_ASSERT(rc == 0); 4320 poll_threads(); 4321 4322 CU_ASSERT(g_unlock_lba_range_done == true); 4323 CU_ASSERT(g_lock_lba_range_done == true); 4324 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4325 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4326 SPDK_CU_ASSERT_FATAL(range != NULL); 4327 CU_ASSERT(range->offset == 35); 4328 CU_ASSERT(range->length == 10); 4329 4330 /* Finally, unlock 35-44. */ 4331 g_unlock_lba_range_done = false; 4332 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4333 CU_ASSERT(rc == 0); 4334 poll_threads(); 4335 4336 CU_ASSERT(g_unlock_lba_range_done == true); 4337 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4338 4339 spdk_put_io_channel(io_ch); 4340 spdk_bdev_close(desc); 4341 free_bdev(bdev); 4342 spdk_bdev_finish(bdev_fini_cb, NULL); 4343 poll_threads(); 4344 } 4345 4346 static void 4347 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4348 { 4349 g_abort_done = true; 4350 g_abort_status = bdev_io->internal.status; 4351 spdk_bdev_free_io(bdev_io); 4352 } 4353 4354 static void 4355 bdev_io_abort(void) 4356 { 4357 struct spdk_bdev *bdev; 4358 struct spdk_bdev_desc *desc = NULL; 4359 struct spdk_io_channel *io_ch; 4360 struct spdk_bdev_channel *channel; 4361 struct spdk_bdev_mgmt_channel *mgmt_ch; 4362 struct spdk_bdev_opts bdev_opts = {}; 4363 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4364 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4365 int rc; 4366 4367 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4368 bdev_opts.bdev_io_pool_size = 7; 4369 bdev_opts.bdev_io_cache_size = 2; 4370 4371 rc = spdk_bdev_set_opts(&bdev_opts); 4372 CU_ASSERT(rc == 0); 4373 spdk_bdev_initialize(bdev_init_cb, NULL); 4374 4375 bdev = allocate_bdev("bdev0"); 4376 4377 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4378 CU_ASSERT(rc == 0); 4379 CU_ASSERT(desc != NULL); 4380 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4381 io_ch = spdk_bdev_get_io_channel(desc); 4382 CU_ASSERT(io_ch != NULL); 4383 channel = spdk_io_channel_get_ctx(io_ch); 4384 mgmt_ch = channel->shared_resource->mgmt_ch; 4385 4386 g_abort_done = false; 4387 4388 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4389 4390 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4391 CU_ASSERT(rc == -ENOTSUP); 4392 4393 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4394 4395 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4396 CU_ASSERT(rc == 0); 4397 CU_ASSERT(g_abort_done == true); 4398 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4399 4400 /* Test the case that the target I/O was successfully aborted. */ 4401 g_io_done = false; 4402 4403 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4404 CU_ASSERT(rc == 0); 4405 CU_ASSERT(g_io_done == false); 4406 4407 g_abort_done = false; 4408 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4409 4410 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4411 CU_ASSERT(rc == 0); 4412 CU_ASSERT(g_io_done == true); 4413 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4414 stub_complete_io(1); 4415 CU_ASSERT(g_abort_done == true); 4416 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4417 4418 /* Test the case that the target I/O was not aborted because it completed 4419 * in the middle of execution of the abort. 4420 */ 4421 g_io_done = false; 4422 4423 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4424 CU_ASSERT(rc == 0); 4425 CU_ASSERT(g_io_done == false); 4426 4427 g_abort_done = false; 4428 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4429 4430 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4431 CU_ASSERT(rc == 0); 4432 CU_ASSERT(g_io_done == false); 4433 4434 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4435 stub_complete_io(1); 4436 CU_ASSERT(g_io_done == true); 4437 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4438 4439 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4440 stub_complete_io(1); 4441 CU_ASSERT(g_abort_done == true); 4442 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4443 4444 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4445 4446 bdev->optimal_io_boundary = 16; 4447 bdev->split_on_optimal_io_boundary = true; 4448 4449 /* Test that a single-vector command which is split is aborted correctly. 4450 * Offset 14, length 8, payload 0xF000 4451 * Child - Offset 14, length 2, payload 0xF000 4452 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4453 */ 4454 g_io_done = false; 4455 4456 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4457 CU_ASSERT(rc == 0); 4458 CU_ASSERT(g_io_done == false); 4459 4460 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4461 4462 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4463 4464 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4465 CU_ASSERT(rc == 0); 4466 CU_ASSERT(g_io_done == true); 4467 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4468 stub_complete_io(2); 4469 CU_ASSERT(g_abort_done == true); 4470 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4471 4472 /* Test that a multi-vector command that needs to be split by strip and then 4473 * needs to be split is aborted correctly. Abort is requested before the second 4474 * child I/O was submitted. The parent I/O should complete with failure without 4475 * submitting the second child I/O. 4476 */ 4477 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4478 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4479 iov[i].iov_len = 512; 4480 } 4481 4482 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4483 g_io_done = false; 4484 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4485 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4486 CU_ASSERT(rc == 0); 4487 CU_ASSERT(g_io_done == false); 4488 4489 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4490 4491 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4492 4493 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4494 CU_ASSERT(rc == 0); 4495 CU_ASSERT(g_io_done == true); 4496 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4497 stub_complete_io(1); 4498 CU_ASSERT(g_abort_done == true); 4499 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4500 4501 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4502 4503 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4504 4505 bdev->optimal_io_boundary = 16; 4506 g_io_done = false; 4507 4508 /* Test that a ingle-vector command which is split is aborted correctly. 4509 * Differently from the above, the child abort request will be submitted 4510 * sequentially due to the capacity of spdk_bdev_io. 4511 */ 4512 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4513 CU_ASSERT(rc == 0); 4514 CU_ASSERT(g_io_done == false); 4515 4516 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4517 4518 g_abort_done = false; 4519 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4520 4521 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4522 CU_ASSERT(rc == 0); 4523 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4524 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4525 4526 stub_complete_io(1); 4527 CU_ASSERT(g_io_done == true); 4528 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4529 stub_complete_io(3); 4530 CU_ASSERT(g_abort_done == true); 4531 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4532 4533 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4534 4535 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4536 4537 spdk_put_io_channel(io_ch); 4538 spdk_bdev_close(desc); 4539 free_bdev(bdev); 4540 spdk_bdev_finish(bdev_fini_cb, NULL); 4541 poll_threads(); 4542 } 4543 4544 static void 4545 bdev_unmap(void) 4546 { 4547 struct spdk_bdev *bdev; 4548 struct spdk_bdev_desc *desc = NULL; 4549 struct spdk_io_channel *ioch; 4550 struct spdk_bdev_channel *bdev_ch; 4551 struct ut_expected_io *expected_io; 4552 struct spdk_bdev_opts bdev_opts = {}; 4553 uint32_t i, num_outstanding; 4554 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4555 int rc; 4556 4557 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4558 bdev_opts.bdev_io_pool_size = 512; 4559 bdev_opts.bdev_io_cache_size = 64; 4560 rc = spdk_bdev_set_opts(&bdev_opts); 4561 CU_ASSERT(rc == 0); 4562 4563 spdk_bdev_initialize(bdev_init_cb, NULL); 4564 bdev = allocate_bdev("bdev"); 4565 4566 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4567 CU_ASSERT_EQUAL(rc, 0); 4568 SPDK_CU_ASSERT_FATAL(desc != NULL); 4569 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4570 ioch = spdk_bdev_get_io_channel(desc); 4571 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4572 bdev_ch = spdk_io_channel_get_ctx(ioch); 4573 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4574 4575 fn_table.submit_request = stub_submit_request; 4576 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4577 4578 /* Case 1: First test the request won't be split */ 4579 num_blocks = 32; 4580 4581 g_io_done = false; 4582 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4583 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4584 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4585 CU_ASSERT_EQUAL(rc, 0); 4586 CU_ASSERT(g_io_done == false); 4587 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4588 stub_complete_io(1); 4589 CU_ASSERT(g_io_done == true); 4590 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4591 4592 /* Case 2: Test the split with 2 children requests */ 4593 bdev->max_unmap = 8; 4594 bdev->max_unmap_segments = 2; 4595 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4596 num_blocks = max_unmap_blocks * 2; 4597 offset = 0; 4598 4599 g_io_done = false; 4600 for (i = 0; i < 2; i++) { 4601 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4602 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4603 offset += max_unmap_blocks; 4604 } 4605 4606 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4607 CU_ASSERT_EQUAL(rc, 0); 4608 CU_ASSERT(g_io_done == false); 4609 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4610 stub_complete_io(2); 4611 CU_ASSERT(g_io_done == true); 4612 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4613 4614 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4615 num_children = 15; 4616 num_blocks = max_unmap_blocks * num_children; 4617 g_io_done = false; 4618 offset = 0; 4619 for (i = 0; i < num_children; i++) { 4620 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4621 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4622 offset += max_unmap_blocks; 4623 } 4624 4625 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4626 CU_ASSERT_EQUAL(rc, 0); 4627 CU_ASSERT(g_io_done == false); 4628 4629 while (num_children > 0) { 4630 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4631 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4632 stub_complete_io(num_outstanding); 4633 num_children -= num_outstanding; 4634 } 4635 CU_ASSERT(g_io_done == true); 4636 4637 spdk_put_io_channel(ioch); 4638 spdk_bdev_close(desc); 4639 free_bdev(bdev); 4640 spdk_bdev_finish(bdev_fini_cb, NULL); 4641 poll_threads(); 4642 } 4643 4644 static void 4645 bdev_write_zeroes_split_test(void) 4646 { 4647 struct spdk_bdev *bdev; 4648 struct spdk_bdev_desc *desc = NULL; 4649 struct spdk_io_channel *ioch; 4650 struct spdk_bdev_channel *bdev_ch; 4651 struct ut_expected_io *expected_io; 4652 struct spdk_bdev_opts bdev_opts = {}; 4653 uint32_t i, num_outstanding; 4654 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 4655 int rc; 4656 4657 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4658 bdev_opts.bdev_io_pool_size = 512; 4659 bdev_opts.bdev_io_cache_size = 64; 4660 rc = spdk_bdev_set_opts(&bdev_opts); 4661 CU_ASSERT(rc == 0); 4662 4663 spdk_bdev_initialize(bdev_init_cb, NULL); 4664 bdev = allocate_bdev("bdev"); 4665 4666 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4667 CU_ASSERT_EQUAL(rc, 0); 4668 SPDK_CU_ASSERT_FATAL(desc != NULL); 4669 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4670 ioch = spdk_bdev_get_io_channel(desc); 4671 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4672 bdev_ch = spdk_io_channel_get_ctx(ioch); 4673 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4674 4675 fn_table.submit_request = stub_submit_request; 4676 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4677 4678 /* Case 1: First test the request won't be split */ 4679 num_blocks = 32; 4680 4681 g_io_done = false; 4682 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4683 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4684 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4685 CU_ASSERT_EQUAL(rc, 0); 4686 CU_ASSERT(g_io_done == false); 4687 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4688 stub_complete_io(1); 4689 CU_ASSERT(g_io_done == true); 4690 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4691 4692 /* Case 2: Test the split with 2 children requests */ 4693 max_write_zeroes_blocks = 8; 4694 bdev->max_write_zeroes = max_write_zeroes_blocks; 4695 num_blocks = max_write_zeroes_blocks * 2; 4696 offset = 0; 4697 4698 g_io_done = false; 4699 for (i = 0; i < 2; i++) { 4700 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4701 0); 4702 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4703 offset += max_write_zeroes_blocks; 4704 } 4705 4706 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4707 CU_ASSERT_EQUAL(rc, 0); 4708 CU_ASSERT(g_io_done == false); 4709 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4710 stub_complete_io(2); 4711 CU_ASSERT(g_io_done == true); 4712 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4713 4714 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4715 num_children = 15; 4716 num_blocks = max_write_zeroes_blocks * num_children; 4717 g_io_done = false; 4718 offset = 0; 4719 for (i = 0; i < num_children; i++) { 4720 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4721 0); 4722 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4723 offset += max_write_zeroes_blocks; 4724 } 4725 4726 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4727 CU_ASSERT_EQUAL(rc, 0); 4728 CU_ASSERT(g_io_done == false); 4729 4730 while (num_children > 0) { 4731 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4732 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4733 stub_complete_io(num_outstanding); 4734 num_children -= num_outstanding; 4735 } 4736 CU_ASSERT(g_io_done == true); 4737 4738 spdk_put_io_channel(ioch); 4739 spdk_bdev_close(desc); 4740 free_bdev(bdev); 4741 spdk_bdev_finish(bdev_fini_cb, NULL); 4742 poll_threads(); 4743 } 4744 4745 static void 4746 bdev_set_options_test(void) 4747 { 4748 struct spdk_bdev_opts bdev_opts = {}; 4749 int rc; 4750 4751 /* Case1: Do not set opts_size */ 4752 rc = spdk_bdev_set_opts(&bdev_opts); 4753 CU_ASSERT(rc == -1); 4754 4755 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4756 bdev_opts.bdev_io_pool_size = 4; 4757 bdev_opts.bdev_io_cache_size = 2; 4758 bdev_opts.small_buf_pool_size = 4; 4759 4760 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4761 rc = spdk_bdev_set_opts(&bdev_opts); 4762 CU_ASSERT(rc == -1); 4763 4764 /* Case 3: Do not set valid large_buf_pool_size */ 4765 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4766 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4767 rc = spdk_bdev_set_opts(&bdev_opts); 4768 CU_ASSERT(rc == -1); 4769 4770 /* Case4: set valid large buf_pool_size */ 4771 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4772 rc = spdk_bdev_set_opts(&bdev_opts); 4773 CU_ASSERT(rc == 0); 4774 4775 /* Case5: Set different valid value for small and large buf pool */ 4776 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4777 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4778 rc = spdk_bdev_set_opts(&bdev_opts); 4779 CU_ASSERT(rc == 0); 4780 } 4781 4782 static uint64_t 4783 get_ns_time(void) 4784 { 4785 int rc; 4786 struct timespec ts; 4787 4788 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 4789 CU_ASSERT(rc == 0); 4790 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 4791 } 4792 4793 static int 4794 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 4795 { 4796 int h1, h2; 4797 4798 if (bdev_name == NULL) { 4799 return -1; 4800 } else { 4801 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 4802 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 4803 4804 return spdk_max(h1, h2) + 1; 4805 } 4806 } 4807 4808 static void 4809 bdev_multi_allocation(void) 4810 { 4811 const int max_bdev_num = 1024 * 16; 4812 char name[max_bdev_num][16]; 4813 char noexist_name[] = "invalid_bdev"; 4814 struct spdk_bdev *bdev[max_bdev_num]; 4815 int i, j; 4816 uint64_t last_time; 4817 int bdev_num; 4818 int height; 4819 4820 for (j = 0; j < max_bdev_num; j++) { 4821 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 4822 } 4823 4824 for (i = 0; i < 16; i++) { 4825 last_time = get_ns_time(); 4826 bdev_num = 1024 * (i + 1); 4827 for (j = 0; j < bdev_num; j++) { 4828 bdev[j] = allocate_bdev(name[j]); 4829 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 4830 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 4831 } 4832 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 4833 (get_ns_time() - last_time) / 1000 / 1000); 4834 for (j = 0; j < bdev_num; j++) { 4835 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 4836 } 4837 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 4838 4839 for (j = 0; j < bdev_num; j++) { 4840 free_bdev(bdev[j]); 4841 } 4842 for (j = 0; j < bdev_num; j++) { 4843 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 4844 } 4845 } 4846 } 4847 4848 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 4849 4850 static int 4851 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 4852 int array_size) 4853 { 4854 if (array_size > 0 && domains) { 4855 domains[0] = g_bdev_memory_domain; 4856 } 4857 4858 return 1; 4859 } 4860 4861 static void 4862 bdev_get_memory_domains(void) 4863 { 4864 struct spdk_bdev_fn_table fn_table = { 4865 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 4866 }; 4867 struct spdk_bdev bdev = { .fn_table = &fn_table }; 4868 struct spdk_memory_domain *domains[2] = {}; 4869 int rc; 4870 4871 /* bdev is NULL */ 4872 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 4873 CU_ASSERT(rc == -EINVAL); 4874 4875 /* domains is NULL */ 4876 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 4877 CU_ASSERT(rc == 1); 4878 4879 /* array size is 0 */ 4880 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 4881 CU_ASSERT(rc == 1); 4882 4883 /* get_supported_dma_device_types op is set */ 4884 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 4885 CU_ASSERT(rc == 1); 4886 CU_ASSERT(domains[0] == g_bdev_memory_domain); 4887 4888 /* get_supported_dma_device_types op is not set */ 4889 fn_table.get_memory_domains = NULL; 4890 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 4891 CU_ASSERT(rc == 0); 4892 } 4893 4894 static void 4895 bdev_writev_readv_ext(void) 4896 { 4897 struct spdk_bdev *bdev; 4898 struct spdk_bdev_desc *desc = NULL; 4899 struct spdk_io_channel *io_ch; 4900 struct iovec iov = { .iov_base = (void *)0xbaaddead, .iov_len = 0x1000 }; 4901 struct ut_expected_io *expected_io; 4902 struct spdk_bdev_ext_io_opts ext_io_opts = { 4903 .metadata = (void *)0xFF000000 4904 }; 4905 int rc; 4906 4907 spdk_bdev_initialize(bdev_init_cb, NULL); 4908 4909 bdev = allocate_bdev("bdev0"); 4910 bdev->md_interleave = false; 4911 bdev->md_len = 8; 4912 4913 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4914 CU_ASSERT(rc == 0); 4915 SPDK_CU_ASSERT_FATAL(desc != NULL); 4916 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4917 io_ch = spdk_bdev_get_io_channel(desc); 4918 CU_ASSERT(io_ch != NULL); 4919 4920 g_io_done = false; 4921 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 4922 expected_io->md_buf = ext_io_opts.metadata; 4923 expected_io->ext_io_opts = &ext_io_opts; 4924 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 4925 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4926 4927 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 4928 4929 CU_ASSERT(rc == 0); 4930 CU_ASSERT(g_io_done == false); 4931 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4932 stub_complete_io(1); 4933 CU_ASSERT(g_io_done == true); 4934 4935 g_io_done = false; 4936 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 4937 expected_io->md_buf = ext_io_opts.metadata; 4938 expected_io->ext_io_opts = &ext_io_opts; 4939 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 4940 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4941 4942 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 4943 4944 CU_ASSERT(rc == 0); 4945 CU_ASSERT(g_io_done == false); 4946 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4947 stub_complete_io(1); 4948 CU_ASSERT(g_io_done == true); 4949 4950 spdk_put_io_channel(io_ch); 4951 spdk_bdev_close(desc); 4952 free_bdev(bdev); 4953 spdk_bdev_finish(bdev_fini_cb, NULL); 4954 poll_threads(); 4955 } 4956 4957 static void 4958 bdev_register_uuid_alias(void) 4959 { 4960 struct spdk_bdev *bdev, *second; 4961 char uuid[SPDK_UUID_STRING_LEN]; 4962 int rc; 4963 4964 spdk_bdev_initialize(bdev_init_cb, NULL); 4965 bdev = allocate_bdev("bdev0"); 4966 4967 /* Make sure an UUID was generated */ 4968 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 4969 4970 /* Check that an UUID alias was registered */ 4971 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 4972 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 4973 4974 /* Unregister the bdev */ 4975 spdk_bdev_unregister(bdev, NULL, NULL); 4976 poll_threads(); 4977 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 4978 4979 /* Check the same, but this time register the bdev with non-zero UUID */ 4980 rc = spdk_bdev_register(bdev); 4981 CU_ASSERT_EQUAL(rc, 0); 4982 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 4983 4984 /* Unregister the bdev */ 4985 spdk_bdev_unregister(bdev, NULL, NULL); 4986 poll_threads(); 4987 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 4988 4989 /* Regiser the bdev using UUID as the name */ 4990 bdev->name = uuid; 4991 rc = spdk_bdev_register(bdev); 4992 CU_ASSERT_EQUAL(rc, 0); 4993 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 4994 4995 /* Unregister the bdev */ 4996 spdk_bdev_unregister(bdev, NULL, NULL); 4997 poll_threads(); 4998 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 4999 5000 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5001 bdev->name = "bdev0"; 5002 second = allocate_bdev("bdev1"); 5003 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5004 rc = spdk_bdev_register(bdev); 5005 CU_ASSERT_EQUAL(rc, -EEXIST); 5006 5007 /* Regenerate the UUID and re-check */ 5008 spdk_uuid_generate(&bdev->uuid); 5009 rc = spdk_bdev_register(bdev); 5010 CU_ASSERT_EQUAL(rc, 0); 5011 5012 /* And check that both bdevs can be retrieved through their UUIDs */ 5013 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5014 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5015 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5016 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5017 5018 free_bdev(second); 5019 free_bdev(bdev); 5020 spdk_bdev_finish(bdev_fini_cb, NULL); 5021 poll_threads(); 5022 } 5023 5024 int 5025 main(int argc, char **argv) 5026 { 5027 CU_pSuite suite = NULL; 5028 unsigned int num_failures; 5029 5030 CU_set_error_action(CUEA_ABORT); 5031 CU_initialize_registry(); 5032 5033 suite = CU_add_suite("bdev", null_init, null_clean); 5034 5035 CU_ADD_TEST(suite, bytes_to_blocks_test); 5036 CU_ADD_TEST(suite, num_blocks_test); 5037 CU_ADD_TEST(suite, io_valid_test); 5038 CU_ADD_TEST(suite, open_write_test); 5039 CU_ADD_TEST(suite, claim_test); 5040 CU_ADD_TEST(suite, alias_add_del_test); 5041 CU_ADD_TEST(suite, get_device_stat_test); 5042 CU_ADD_TEST(suite, bdev_io_types_test); 5043 CU_ADD_TEST(suite, bdev_io_wait_test); 5044 CU_ADD_TEST(suite, bdev_io_spans_split_test); 5045 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 5046 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 5047 CU_ADD_TEST(suite, bdev_io_mix_split_test); 5048 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 5049 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 5050 CU_ADD_TEST(suite, bdev_io_alignment); 5051 CU_ADD_TEST(suite, bdev_histograms); 5052 CU_ADD_TEST(suite, bdev_write_zeroes); 5053 CU_ADD_TEST(suite, bdev_compare_and_write); 5054 CU_ADD_TEST(suite, bdev_compare); 5055 CU_ADD_TEST(suite, bdev_zcopy_write); 5056 CU_ADD_TEST(suite, bdev_zcopy_read); 5057 CU_ADD_TEST(suite, bdev_open_while_hotremove); 5058 CU_ADD_TEST(suite, bdev_close_while_hotremove); 5059 CU_ADD_TEST(suite, bdev_open_ext); 5060 CU_ADD_TEST(suite, bdev_set_io_timeout); 5061 CU_ADD_TEST(suite, lba_range_overlap); 5062 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 5063 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 5064 CU_ADD_TEST(suite, lock_lba_range_overlapped); 5065 CU_ADD_TEST(suite, bdev_io_abort); 5066 CU_ADD_TEST(suite, bdev_unmap); 5067 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 5068 CU_ADD_TEST(suite, bdev_set_options_test); 5069 CU_ADD_TEST(suite, bdev_multi_allocation); 5070 CU_ADD_TEST(suite, bdev_get_memory_domains); 5071 CU_ADD_TEST(suite, bdev_writev_readv_ext); 5072 CU_ADD_TEST(suite, bdev_register_uuid_alias); 5073 5074 allocate_cores(1); 5075 allocate_threads(1); 5076 set_thread(0); 5077 5078 CU_basic_set_mode(CU_BRM_VERBOSE); 5079 CU_basic_run_tests(); 5080 num_failures = CU_get_number_of_failures(); 5081 CU_cleanup_registry(); 5082 5083 free_threads(); 5084 free_cores(); 5085 5086 return num_failures; 5087 } 5088