1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 46 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 47 48 int g_status; 49 int g_count; 50 enum spdk_bdev_event_type g_event_type1; 51 enum spdk_bdev_event_type g_event_type2; 52 struct spdk_histogram_data *g_histogram; 53 void *g_unregister_arg; 54 int g_unregister_rc; 55 56 void 57 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 58 int *sc, int *sk, int *asc, int *ascq) 59 { 60 } 61 62 static int 63 null_init(void) 64 { 65 return 0; 66 } 67 68 static int 69 null_clean(void) 70 { 71 return 0; 72 } 73 74 static int 75 stub_destruct(void *ctx) 76 { 77 return 0; 78 } 79 80 struct ut_expected_io { 81 uint8_t type; 82 uint64_t offset; 83 uint64_t length; 84 int iovcnt; 85 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 86 void *md_buf; 87 TAILQ_ENTRY(ut_expected_io) link; 88 }; 89 90 struct bdev_ut_channel { 91 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 92 uint32_t outstanding_io_count; 93 TAILQ_HEAD(, ut_expected_io) expected_io; 94 }; 95 96 static bool g_io_done; 97 static struct spdk_bdev_io *g_bdev_io; 98 static enum spdk_bdev_io_status g_io_status; 99 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 100 static uint32_t g_bdev_ut_io_device; 101 static struct bdev_ut_channel *g_bdev_ut_channel; 102 static void *g_compare_read_buf; 103 static uint32_t g_compare_read_buf_len; 104 static void *g_compare_write_buf; 105 static uint32_t g_compare_write_buf_len; 106 static bool g_abort_done; 107 static enum spdk_bdev_io_status g_abort_status; 108 static void *g_zcopy_read_buf; 109 static uint32_t g_zcopy_read_buf_len; 110 static void *g_zcopy_write_buf; 111 static uint32_t g_zcopy_write_buf_len; 112 static struct spdk_bdev_io *g_zcopy_bdev_io; 113 114 static struct ut_expected_io * 115 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 116 { 117 struct ut_expected_io *expected_io; 118 119 expected_io = calloc(1, sizeof(*expected_io)); 120 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 121 122 expected_io->type = type; 123 expected_io->offset = offset; 124 expected_io->length = length; 125 expected_io->iovcnt = iovcnt; 126 127 return expected_io; 128 } 129 130 static void 131 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 132 { 133 expected_io->iov[pos].iov_base = base; 134 expected_io->iov[pos].iov_len = len; 135 } 136 137 static void 138 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 139 { 140 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 141 struct ut_expected_io *expected_io; 142 struct iovec *iov, *expected_iov; 143 struct spdk_bdev_io *bio_to_abort; 144 int i; 145 146 g_bdev_io = bdev_io; 147 148 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 149 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 150 151 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 152 CU_ASSERT(g_compare_read_buf_len == len); 153 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 154 } 155 156 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 157 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 158 159 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 160 CU_ASSERT(g_compare_write_buf_len == len); 161 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 162 } 163 164 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 165 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 166 167 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 168 CU_ASSERT(g_compare_read_buf_len == len); 169 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 170 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 171 } 172 } 173 174 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 175 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 176 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 177 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 178 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 179 ch->outstanding_io_count--; 180 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 181 break; 182 } 183 } 184 } 185 } 186 187 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 188 if (bdev_io->u.bdev.zcopy.start) { 189 g_zcopy_bdev_io = bdev_io; 190 if (bdev_io->u.bdev.zcopy.populate) { 191 /* Start of a read */ 192 CU_ASSERT(g_zcopy_read_buf != NULL); 193 CU_ASSERT(g_zcopy_read_buf_len > 0); 194 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 195 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 196 bdev_io->u.bdev.iovcnt = 1; 197 } else { 198 /* Start of a write */ 199 CU_ASSERT(g_zcopy_write_buf != NULL); 200 CU_ASSERT(g_zcopy_write_buf_len > 0); 201 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 202 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 203 bdev_io->u.bdev.iovcnt = 1; 204 } 205 } else { 206 if (bdev_io->u.bdev.zcopy.commit) { 207 /* End of write */ 208 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 209 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 210 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 211 g_zcopy_write_buf = NULL; 212 g_zcopy_write_buf_len = 0; 213 } else { 214 /* End of read */ 215 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 216 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 217 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 218 g_zcopy_read_buf = NULL; 219 g_zcopy_read_buf_len = 0; 220 } 221 } 222 } 223 224 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 225 ch->outstanding_io_count++; 226 227 expected_io = TAILQ_FIRST(&ch->expected_io); 228 if (expected_io == NULL) { 229 return; 230 } 231 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 232 233 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 234 CU_ASSERT(bdev_io->type == expected_io->type); 235 } 236 237 if (expected_io->md_buf != NULL) { 238 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 239 } 240 241 if (expected_io->length == 0) { 242 free(expected_io); 243 return; 244 } 245 246 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 247 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 248 249 if (expected_io->iovcnt == 0) { 250 free(expected_io); 251 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 252 return; 253 } 254 255 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 256 for (i = 0; i < expected_io->iovcnt; i++) { 257 iov = &bdev_io->u.bdev.iovs[i]; 258 expected_iov = &expected_io->iov[i]; 259 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 260 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 261 } 262 263 free(expected_io); 264 } 265 266 static void 267 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 268 struct spdk_bdev_io *bdev_io, bool success) 269 { 270 CU_ASSERT(success == true); 271 272 stub_submit_request(_ch, bdev_io); 273 } 274 275 static void 276 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 277 { 278 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 279 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 280 } 281 282 static uint32_t 283 stub_complete_io(uint32_t num_to_complete) 284 { 285 struct bdev_ut_channel *ch = g_bdev_ut_channel; 286 struct spdk_bdev_io *bdev_io; 287 static enum spdk_bdev_io_status io_status; 288 uint32_t num_completed = 0; 289 290 while (num_completed < num_to_complete) { 291 if (TAILQ_EMPTY(&ch->outstanding_io)) { 292 break; 293 } 294 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 295 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 296 ch->outstanding_io_count--; 297 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 298 g_io_exp_status; 299 spdk_bdev_io_complete(bdev_io, io_status); 300 num_completed++; 301 } 302 303 return num_completed; 304 } 305 306 static struct spdk_io_channel * 307 bdev_ut_get_io_channel(void *ctx) 308 { 309 return spdk_get_io_channel(&g_bdev_ut_io_device); 310 } 311 312 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 313 [SPDK_BDEV_IO_TYPE_READ] = true, 314 [SPDK_BDEV_IO_TYPE_WRITE] = true, 315 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 316 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 317 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 318 [SPDK_BDEV_IO_TYPE_RESET] = true, 319 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 320 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 321 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 322 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 323 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 324 [SPDK_BDEV_IO_TYPE_ABORT] = true, 325 }; 326 327 static void 328 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 329 { 330 g_io_types_supported[io_type] = enable; 331 } 332 333 static bool 334 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 335 { 336 return g_io_types_supported[io_type]; 337 } 338 339 static struct spdk_bdev_fn_table fn_table = { 340 .destruct = stub_destruct, 341 .submit_request = stub_submit_request, 342 .get_io_channel = bdev_ut_get_io_channel, 343 .io_type_supported = stub_io_type_supported, 344 }; 345 346 static int 347 bdev_ut_create_ch(void *io_device, void *ctx_buf) 348 { 349 struct bdev_ut_channel *ch = ctx_buf; 350 351 CU_ASSERT(g_bdev_ut_channel == NULL); 352 g_bdev_ut_channel = ch; 353 354 TAILQ_INIT(&ch->outstanding_io); 355 ch->outstanding_io_count = 0; 356 TAILQ_INIT(&ch->expected_io); 357 return 0; 358 } 359 360 static void 361 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 362 { 363 CU_ASSERT(g_bdev_ut_channel != NULL); 364 g_bdev_ut_channel = NULL; 365 } 366 367 struct spdk_bdev_module bdev_ut_if; 368 369 static int 370 bdev_ut_module_init(void) 371 { 372 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 373 sizeof(struct bdev_ut_channel), NULL); 374 spdk_bdev_module_init_done(&bdev_ut_if); 375 return 0; 376 } 377 378 static void 379 bdev_ut_module_fini(void) 380 { 381 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 382 } 383 384 struct spdk_bdev_module bdev_ut_if = { 385 .name = "bdev_ut", 386 .module_init = bdev_ut_module_init, 387 .module_fini = bdev_ut_module_fini, 388 .async_init = true, 389 }; 390 391 static void vbdev_ut_examine(struct spdk_bdev *bdev); 392 393 static int 394 vbdev_ut_module_init(void) 395 { 396 return 0; 397 } 398 399 static void 400 vbdev_ut_module_fini(void) 401 { 402 } 403 404 struct spdk_bdev_module vbdev_ut_if = { 405 .name = "vbdev_ut", 406 .module_init = vbdev_ut_module_init, 407 .module_fini = vbdev_ut_module_fini, 408 .examine_config = vbdev_ut_examine, 409 }; 410 411 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 412 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 413 414 static void 415 vbdev_ut_examine(struct spdk_bdev *bdev) 416 { 417 spdk_bdev_module_examine_done(&vbdev_ut_if); 418 } 419 420 static struct spdk_bdev * 421 allocate_bdev(char *name) 422 { 423 struct spdk_bdev *bdev; 424 int rc; 425 426 bdev = calloc(1, sizeof(*bdev)); 427 SPDK_CU_ASSERT_FATAL(bdev != NULL); 428 429 bdev->name = name; 430 bdev->fn_table = &fn_table; 431 bdev->module = &bdev_ut_if; 432 bdev->blockcnt = 1024; 433 bdev->blocklen = 512; 434 435 rc = spdk_bdev_register(bdev); 436 CU_ASSERT(rc == 0); 437 438 return bdev; 439 } 440 441 static struct spdk_bdev * 442 allocate_vbdev(char *name) 443 { 444 struct spdk_bdev *bdev; 445 int rc; 446 447 bdev = calloc(1, sizeof(*bdev)); 448 SPDK_CU_ASSERT_FATAL(bdev != NULL); 449 450 bdev->name = name; 451 bdev->fn_table = &fn_table; 452 bdev->module = &vbdev_ut_if; 453 454 rc = spdk_bdev_register(bdev); 455 CU_ASSERT(rc == 0); 456 457 return bdev; 458 } 459 460 static void 461 free_bdev(struct spdk_bdev *bdev) 462 { 463 spdk_bdev_unregister(bdev, NULL, NULL); 464 poll_threads(); 465 memset(bdev, 0xFF, sizeof(*bdev)); 466 free(bdev); 467 } 468 469 static void 470 free_vbdev(struct spdk_bdev *bdev) 471 { 472 spdk_bdev_unregister(bdev, NULL, NULL); 473 poll_threads(); 474 memset(bdev, 0xFF, sizeof(*bdev)); 475 free(bdev); 476 } 477 478 static void 479 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 480 { 481 const char *bdev_name; 482 483 CU_ASSERT(bdev != NULL); 484 CU_ASSERT(rc == 0); 485 bdev_name = spdk_bdev_get_name(bdev); 486 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 487 488 free(stat); 489 490 *(bool *)cb_arg = true; 491 } 492 493 static void 494 bdev_unregister_cb(void *cb_arg, int rc) 495 { 496 g_unregister_arg = cb_arg; 497 g_unregister_rc = rc; 498 } 499 500 static void 501 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 502 { 503 } 504 505 static void 506 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 507 { 508 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 509 510 g_event_type1 = type; 511 if (SPDK_BDEV_EVENT_REMOVE == type) { 512 spdk_bdev_close(desc); 513 } 514 } 515 516 static void 517 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 518 { 519 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 520 521 g_event_type2 = type; 522 if (SPDK_BDEV_EVENT_REMOVE == type) { 523 spdk_bdev_close(desc); 524 } 525 } 526 527 static void 528 get_device_stat_test(void) 529 { 530 struct spdk_bdev *bdev; 531 struct spdk_bdev_io_stat *stat; 532 bool done; 533 534 bdev = allocate_bdev("bdev0"); 535 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 536 if (stat == NULL) { 537 free_bdev(bdev); 538 return; 539 } 540 541 done = false; 542 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 543 while (!done) { poll_threads(); } 544 545 free_bdev(bdev); 546 } 547 548 static void 549 open_write_test(void) 550 { 551 struct spdk_bdev *bdev[9]; 552 struct spdk_bdev_desc *desc[9] = {}; 553 int rc; 554 555 /* 556 * Create a tree of bdevs to test various open w/ write cases. 557 * 558 * bdev0 through bdev3 are physical block devices, such as NVMe 559 * namespaces or Ceph block devices. 560 * 561 * bdev4 is a virtual bdev with multiple base bdevs. This models 562 * caching or RAID use cases. 563 * 564 * bdev5 through bdev7 are all virtual bdevs with the same base 565 * bdev (except bdev7). This models partitioning or logical volume 566 * use cases. 567 * 568 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 569 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 570 * models caching, RAID, partitioning or logical volumes use cases. 571 * 572 * bdev8 is a virtual bdev with multiple base bdevs, but these 573 * base bdevs are themselves virtual bdevs. 574 * 575 * bdev8 576 * | 577 * +----------+ 578 * | | 579 * bdev4 bdev5 bdev6 bdev7 580 * | | | | 581 * +---+---+ +---+ + +---+---+ 582 * | | \ | / \ 583 * bdev0 bdev1 bdev2 bdev3 584 */ 585 586 bdev[0] = allocate_bdev("bdev0"); 587 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 588 CU_ASSERT(rc == 0); 589 590 bdev[1] = allocate_bdev("bdev1"); 591 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 592 CU_ASSERT(rc == 0); 593 594 bdev[2] = allocate_bdev("bdev2"); 595 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 596 CU_ASSERT(rc == 0); 597 598 bdev[3] = allocate_bdev("bdev3"); 599 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 600 CU_ASSERT(rc == 0); 601 602 bdev[4] = allocate_vbdev("bdev4"); 603 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 604 CU_ASSERT(rc == 0); 605 606 bdev[5] = allocate_vbdev("bdev5"); 607 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 608 CU_ASSERT(rc == 0); 609 610 bdev[6] = allocate_vbdev("bdev6"); 611 612 bdev[7] = allocate_vbdev("bdev7"); 613 614 bdev[8] = allocate_vbdev("bdev8"); 615 616 /* Open bdev0 read-only. This should succeed. */ 617 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 618 CU_ASSERT(rc == 0); 619 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 620 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 621 spdk_bdev_close(desc[0]); 622 623 /* 624 * Open bdev1 read/write. This should fail since bdev1 has been claimed 625 * by a vbdev module. 626 */ 627 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 628 CU_ASSERT(rc == -EPERM); 629 630 /* 631 * Open bdev4 read/write. This should fail since bdev3 has been claimed 632 * by a vbdev module. 633 */ 634 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 635 CU_ASSERT(rc == -EPERM); 636 637 /* Open bdev4 read-only. This should succeed. */ 638 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 639 CU_ASSERT(rc == 0); 640 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 641 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 642 spdk_bdev_close(desc[4]); 643 644 /* 645 * Open bdev8 read/write. This should succeed since it is a leaf 646 * bdev. 647 */ 648 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 649 CU_ASSERT(rc == 0); 650 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 651 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 652 spdk_bdev_close(desc[8]); 653 654 /* 655 * Open bdev5 read/write. This should fail since bdev4 has been claimed 656 * by a vbdev module. 657 */ 658 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 659 CU_ASSERT(rc == -EPERM); 660 661 /* Open bdev4 read-only. This should succeed. */ 662 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 663 CU_ASSERT(rc == 0); 664 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 665 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 666 spdk_bdev_close(desc[5]); 667 668 free_vbdev(bdev[8]); 669 670 free_vbdev(bdev[5]); 671 free_vbdev(bdev[6]); 672 free_vbdev(bdev[7]); 673 674 free_vbdev(bdev[4]); 675 676 free_bdev(bdev[0]); 677 free_bdev(bdev[1]); 678 free_bdev(bdev[2]); 679 free_bdev(bdev[3]); 680 } 681 682 static void 683 bytes_to_blocks_test(void) 684 { 685 struct spdk_bdev bdev; 686 uint64_t offset_blocks, num_blocks; 687 688 memset(&bdev, 0, sizeof(bdev)); 689 690 bdev.blocklen = 512; 691 692 /* All parameters valid */ 693 offset_blocks = 0; 694 num_blocks = 0; 695 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 696 CU_ASSERT(offset_blocks == 1); 697 CU_ASSERT(num_blocks == 2); 698 699 /* Offset not a block multiple */ 700 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 701 702 /* Length not a block multiple */ 703 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 704 705 /* In case blocklen not the power of two */ 706 bdev.blocklen = 100; 707 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 708 CU_ASSERT(offset_blocks == 1); 709 CU_ASSERT(num_blocks == 2); 710 711 /* Offset not a block multiple */ 712 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 713 714 /* Length not a block multiple */ 715 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 716 } 717 718 static void 719 num_blocks_test(void) 720 { 721 struct spdk_bdev bdev; 722 struct spdk_bdev_desc *desc = NULL; 723 int rc; 724 725 memset(&bdev, 0, sizeof(bdev)); 726 bdev.name = "num_blocks"; 727 bdev.fn_table = &fn_table; 728 bdev.module = &bdev_ut_if; 729 spdk_bdev_register(&bdev); 730 spdk_bdev_notify_blockcnt_change(&bdev, 50); 731 732 /* Growing block number */ 733 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 734 /* Shrinking block number */ 735 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 736 737 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 738 CU_ASSERT(rc == 0); 739 SPDK_CU_ASSERT_FATAL(desc != NULL); 740 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 741 742 /* Growing block number */ 743 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 744 /* Shrinking block number */ 745 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 746 747 g_event_type1 = 0xFF; 748 /* Growing block number */ 749 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 750 751 poll_threads(); 752 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 753 754 g_event_type1 = 0xFF; 755 /* Growing block number and closing */ 756 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 757 758 spdk_bdev_close(desc); 759 spdk_bdev_unregister(&bdev, NULL, NULL); 760 761 poll_threads(); 762 763 /* Callback is not called for closed device */ 764 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 765 } 766 767 static void 768 io_valid_test(void) 769 { 770 struct spdk_bdev bdev; 771 772 memset(&bdev, 0, sizeof(bdev)); 773 774 bdev.blocklen = 512; 775 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 776 777 spdk_bdev_notify_blockcnt_change(&bdev, 100); 778 779 /* All parameters valid */ 780 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 781 782 /* Last valid block */ 783 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 784 785 /* Offset past end of bdev */ 786 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 787 788 /* Offset + length past end of bdev */ 789 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 790 791 /* Offset near end of uint64_t range (2^64 - 1) */ 792 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 793 794 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 795 } 796 797 static void 798 alias_add_del_test(void) 799 { 800 struct spdk_bdev *bdev[3]; 801 int rc; 802 803 /* Creating and registering bdevs */ 804 bdev[0] = allocate_bdev("bdev0"); 805 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 806 807 bdev[1] = allocate_bdev("bdev1"); 808 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 809 810 bdev[2] = allocate_bdev("bdev2"); 811 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 812 813 poll_threads(); 814 815 /* 816 * Trying adding an alias identical to name. 817 * Alias is identical to name, so it can not be added to aliases list 818 */ 819 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 820 CU_ASSERT(rc == -EEXIST); 821 822 /* 823 * Trying to add empty alias, 824 * this one should fail 825 */ 826 rc = spdk_bdev_alias_add(bdev[0], NULL); 827 CU_ASSERT(rc == -EINVAL); 828 829 /* Trying adding same alias to two different registered bdevs */ 830 831 /* Alias is used first time, so this one should pass */ 832 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 833 CU_ASSERT(rc == 0); 834 835 /* Alias was added to another bdev, so this one should fail */ 836 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 837 CU_ASSERT(rc == -EEXIST); 838 839 /* Alias is used first time, so this one should pass */ 840 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 841 CU_ASSERT(rc == 0); 842 843 /* Trying removing an alias from registered bdevs */ 844 845 /* Alias is not on a bdev aliases list, so this one should fail */ 846 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 847 CU_ASSERT(rc == -ENOENT); 848 849 /* Alias is present on a bdev aliases list, so this one should pass */ 850 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 851 CU_ASSERT(rc == 0); 852 853 /* Alias is present on a bdev aliases list, so this one should pass */ 854 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 855 CU_ASSERT(rc == 0); 856 857 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 858 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 859 CU_ASSERT(rc != 0); 860 861 /* Trying to del all alias from empty alias list */ 862 spdk_bdev_alias_del_all(bdev[2]); 863 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 864 865 /* Trying to del all alias from non-empty alias list */ 866 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 867 CU_ASSERT(rc == 0); 868 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 869 CU_ASSERT(rc == 0); 870 spdk_bdev_alias_del_all(bdev[2]); 871 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 872 873 /* Unregister and free bdevs */ 874 spdk_bdev_unregister(bdev[0], NULL, NULL); 875 spdk_bdev_unregister(bdev[1], NULL, NULL); 876 spdk_bdev_unregister(bdev[2], NULL, NULL); 877 878 poll_threads(); 879 880 free(bdev[0]); 881 free(bdev[1]); 882 free(bdev[2]); 883 } 884 885 static void 886 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 887 { 888 g_io_done = true; 889 g_io_status = bdev_io->internal.status; 890 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 891 (bdev_io->u.bdev.zcopy.start)) { 892 g_zcopy_bdev_io = bdev_io; 893 } else { 894 spdk_bdev_free_io(bdev_io); 895 g_zcopy_bdev_io = NULL; 896 } 897 } 898 899 static void 900 bdev_init_cb(void *arg, int rc) 901 { 902 CU_ASSERT(rc == 0); 903 } 904 905 static void 906 bdev_fini_cb(void *arg) 907 { 908 } 909 910 struct bdev_ut_io_wait_entry { 911 struct spdk_bdev_io_wait_entry entry; 912 struct spdk_io_channel *io_ch; 913 struct spdk_bdev_desc *desc; 914 bool submitted; 915 }; 916 917 static void 918 io_wait_cb(void *arg) 919 { 920 struct bdev_ut_io_wait_entry *entry = arg; 921 int rc; 922 923 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 924 CU_ASSERT(rc == 0); 925 entry->submitted = true; 926 } 927 928 static void 929 bdev_io_types_test(void) 930 { 931 struct spdk_bdev *bdev; 932 struct spdk_bdev_desc *desc = NULL; 933 struct spdk_io_channel *io_ch; 934 struct spdk_bdev_opts bdev_opts = {}; 935 int rc; 936 937 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 938 bdev_opts.bdev_io_pool_size = 4; 939 bdev_opts.bdev_io_cache_size = 2; 940 941 rc = spdk_bdev_set_opts(&bdev_opts); 942 CU_ASSERT(rc == 0); 943 spdk_bdev_initialize(bdev_init_cb, NULL); 944 poll_threads(); 945 946 bdev = allocate_bdev("bdev0"); 947 948 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 949 CU_ASSERT(rc == 0); 950 poll_threads(); 951 SPDK_CU_ASSERT_FATAL(desc != NULL); 952 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 953 io_ch = spdk_bdev_get_io_channel(desc); 954 CU_ASSERT(io_ch != NULL); 955 956 /* WRITE and WRITE ZEROES are not supported */ 957 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 958 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 959 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 960 CU_ASSERT(rc == -ENOTSUP); 961 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 962 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 963 964 spdk_put_io_channel(io_ch); 965 spdk_bdev_close(desc); 966 free_bdev(bdev); 967 spdk_bdev_finish(bdev_fini_cb, NULL); 968 poll_threads(); 969 } 970 971 static void 972 bdev_io_wait_test(void) 973 { 974 struct spdk_bdev *bdev; 975 struct spdk_bdev_desc *desc = NULL; 976 struct spdk_io_channel *io_ch; 977 struct spdk_bdev_opts bdev_opts = {}; 978 struct bdev_ut_io_wait_entry io_wait_entry; 979 struct bdev_ut_io_wait_entry io_wait_entry2; 980 int rc; 981 982 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 983 bdev_opts.bdev_io_pool_size = 4; 984 bdev_opts.bdev_io_cache_size = 2; 985 986 rc = spdk_bdev_set_opts(&bdev_opts); 987 CU_ASSERT(rc == 0); 988 spdk_bdev_initialize(bdev_init_cb, NULL); 989 poll_threads(); 990 991 bdev = allocate_bdev("bdev0"); 992 993 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 994 CU_ASSERT(rc == 0); 995 poll_threads(); 996 SPDK_CU_ASSERT_FATAL(desc != NULL); 997 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 998 io_ch = spdk_bdev_get_io_channel(desc); 999 CU_ASSERT(io_ch != NULL); 1000 1001 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1002 CU_ASSERT(rc == 0); 1003 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1004 CU_ASSERT(rc == 0); 1005 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1006 CU_ASSERT(rc == 0); 1007 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1008 CU_ASSERT(rc == 0); 1009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1010 1011 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1012 CU_ASSERT(rc == -ENOMEM); 1013 1014 io_wait_entry.entry.bdev = bdev; 1015 io_wait_entry.entry.cb_fn = io_wait_cb; 1016 io_wait_entry.entry.cb_arg = &io_wait_entry; 1017 io_wait_entry.io_ch = io_ch; 1018 io_wait_entry.desc = desc; 1019 io_wait_entry.submitted = false; 1020 /* Cannot use the same io_wait_entry for two different calls. */ 1021 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1022 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1023 1024 /* Queue two I/O waits. */ 1025 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1026 CU_ASSERT(rc == 0); 1027 CU_ASSERT(io_wait_entry.submitted == false); 1028 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1029 CU_ASSERT(rc == 0); 1030 CU_ASSERT(io_wait_entry2.submitted == false); 1031 1032 stub_complete_io(1); 1033 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1034 CU_ASSERT(io_wait_entry.submitted == true); 1035 CU_ASSERT(io_wait_entry2.submitted == false); 1036 1037 stub_complete_io(1); 1038 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1039 CU_ASSERT(io_wait_entry2.submitted == true); 1040 1041 stub_complete_io(4); 1042 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1043 1044 spdk_put_io_channel(io_ch); 1045 spdk_bdev_close(desc); 1046 free_bdev(bdev); 1047 spdk_bdev_finish(bdev_fini_cb, NULL); 1048 poll_threads(); 1049 } 1050 1051 static void 1052 bdev_io_spans_split_test(void) 1053 { 1054 struct spdk_bdev bdev; 1055 struct spdk_bdev_io bdev_io; 1056 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1057 1058 memset(&bdev, 0, sizeof(bdev)); 1059 bdev_io.u.bdev.iovs = iov; 1060 1061 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1062 bdev.optimal_io_boundary = 0; 1063 bdev.max_segment_size = 0; 1064 bdev.max_num_segments = 0; 1065 bdev_io.bdev = &bdev; 1066 1067 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1068 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1069 1070 bdev.split_on_optimal_io_boundary = true; 1071 bdev.optimal_io_boundary = 32; 1072 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1073 1074 /* RESETs are not based on LBAs - so this should return false. */ 1075 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1076 1077 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1078 bdev_io.u.bdev.offset_blocks = 0; 1079 bdev_io.u.bdev.num_blocks = 32; 1080 1081 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1082 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1083 1084 bdev_io.u.bdev.num_blocks = 33; 1085 1086 /* This I/O spans a boundary. */ 1087 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1088 1089 bdev_io.u.bdev.num_blocks = 32; 1090 bdev.max_segment_size = 512 * 32; 1091 bdev.max_num_segments = 1; 1092 bdev_io.u.bdev.iovcnt = 1; 1093 iov[0].iov_len = 512; 1094 1095 /* Does not cross and exceed max_size or max_segs */ 1096 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1097 1098 bdev.split_on_optimal_io_boundary = false; 1099 bdev.max_segment_size = 512; 1100 bdev.max_num_segments = 1; 1101 bdev_io.u.bdev.iovcnt = 2; 1102 1103 /* Exceed max_segs */ 1104 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1105 1106 bdev.max_num_segments = 2; 1107 iov[0].iov_len = 513; 1108 iov[1].iov_len = 512; 1109 1110 /* Exceed max_sizes */ 1111 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1112 } 1113 1114 static void 1115 bdev_io_boundary_split_test(void) 1116 { 1117 struct spdk_bdev *bdev; 1118 struct spdk_bdev_desc *desc = NULL; 1119 struct spdk_io_channel *io_ch; 1120 struct spdk_bdev_opts bdev_opts = {}; 1121 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1122 struct ut_expected_io *expected_io; 1123 void *md_buf = (void *)0xFF000000; 1124 uint64_t i; 1125 int rc; 1126 1127 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1128 bdev_opts.bdev_io_pool_size = 512; 1129 bdev_opts.bdev_io_cache_size = 64; 1130 1131 rc = spdk_bdev_set_opts(&bdev_opts); 1132 CU_ASSERT(rc == 0); 1133 spdk_bdev_initialize(bdev_init_cb, NULL); 1134 1135 bdev = allocate_bdev("bdev0"); 1136 1137 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1138 CU_ASSERT(rc == 0); 1139 SPDK_CU_ASSERT_FATAL(desc != NULL); 1140 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1141 io_ch = spdk_bdev_get_io_channel(desc); 1142 CU_ASSERT(io_ch != NULL); 1143 1144 bdev->optimal_io_boundary = 16; 1145 bdev->split_on_optimal_io_boundary = false; 1146 1147 g_io_done = false; 1148 1149 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1150 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1151 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1152 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1153 1154 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1155 CU_ASSERT(rc == 0); 1156 CU_ASSERT(g_io_done == false); 1157 1158 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1159 stub_complete_io(1); 1160 CU_ASSERT(g_io_done == true); 1161 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1162 1163 bdev->split_on_optimal_io_boundary = true; 1164 bdev->md_interleave = false; 1165 bdev->md_len = 8; 1166 1167 /* Now test that a single-vector command is split correctly. 1168 * Offset 14, length 8, payload 0xF000 1169 * Child - Offset 14, length 2, payload 0xF000 1170 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1171 * 1172 * Set up the expected values before calling spdk_bdev_read_blocks 1173 */ 1174 g_io_done = false; 1175 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1176 expected_io->md_buf = md_buf; 1177 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1178 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1179 1180 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1181 expected_io->md_buf = md_buf + 2 * 8; 1182 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1183 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1184 1185 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1186 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1187 14, 8, io_done, NULL); 1188 CU_ASSERT(rc == 0); 1189 CU_ASSERT(g_io_done == false); 1190 1191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1192 stub_complete_io(2); 1193 CU_ASSERT(g_io_done == true); 1194 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1195 1196 /* Now set up a more complex, multi-vector command that needs to be split, 1197 * including splitting iovecs. 1198 */ 1199 iov[0].iov_base = (void *)0x10000; 1200 iov[0].iov_len = 512; 1201 iov[1].iov_base = (void *)0x20000; 1202 iov[1].iov_len = 20 * 512; 1203 iov[2].iov_base = (void *)0x30000; 1204 iov[2].iov_len = 11 * 512; 1205 1206 g_io_done = false; 1207 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1208 expected_io->md_buf = md_buf; 1209 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1210 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1211 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1212 1213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1214 expected_io->md_buf = md_buf + 2 * 8; 1215 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1216 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1217 1218 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1219 expected_io->md_buf = md_buf + 18 * 8; 1220 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1221 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1222 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1223 1224 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1225 14, 32, io_done, NULL); 1226 CU_ASSERT(rc == 0); 1227 CU_ASSERT(g_io_done == false); 1228 1229 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1230 stub_complete_io(3); 1231 CU_ASSERT(g_io_done == true); 1232 1233 /* Test multi vector command that needs to be split by strip and then needs to be 1234 * split further due to the capacity of child iovs. 1235 */ 1236 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1237 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1238 iov[i].iov_len = 512; 1239 } 1240 1241 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1242 g_io_done = false; 1243 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1244 BDEV_IO_NUM_CHILD_IOV); 1245 expected_io->md_buf = md_buf; 1246 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1247 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1248 } 1249 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1250 1251 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1252 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1253 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1254 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1255 ut_expected_io_set_iov(expected_io, i, 1256 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1257 } 1258 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1259 1260 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1261 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1262 CU_ASSERT(rc == 0); 1263 CU_ASSERT(g_io_done == false); 1264 1265 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1266 stub_complete_io(1); 1267 CU_ASSERT(g_io_done == false); 1268 1269 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1270 stub_complete_io(1); 1271 CU_ASSERT(g_io_done == true); 1272 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1273 1274 /* Test multi vector command that needs to be split by strip and then needs to be 1275 * split further due to the capacity of child iovs. In this case, the length of 1276 * the rest of iovec array with an I/O boundary is the multiple of block size. 1277 */ 1278 1279 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1280 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1281 */ 1282 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1283 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1284 iov[i].iov_len = 512; 1285 } 1286 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1287 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1288 iov[i].iov_len = 256; 1289 } 1290 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1291 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1292 1293 /* Add an extra iovec to trigger split */ 1294 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1295 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1296 1297 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1298 g_io_done = false; 1299 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1300 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1301 expected_io->md_buf = md_buf; 1302 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1303 ut_expected_io_set_iov(expected_io, i, 1304 (void *)((i + 1) * 0x10000), 512); 1305 } 1306 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1307 ut_expected_io_set_iov(expected_io, i, 1308 (void *)((i + 1) * 0x10000), 256); 1309 } 1310 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1311 1312 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1313 1, 1); 1314 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1315 ut_expected_io_set_iov(expected_io, 0, 1316 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1317 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1318 1319 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1320 1, 1); 1321 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1322 ut_expected_io_set_iov(expected_io, 0, 1323 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1324 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1325 1326 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1327 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1328 CU_ASSERT(rc == 0); 1329 CU_ASSERT(g_io_done == false); 1330 1331 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1332 stub_complete_io(1); 1333 CU_ASSERT(g_io_done == false); 1334 1335 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1336 stub_complete_io(2); 1337 CU_ASSERT(g_io_done == true); 1338 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1339 1340 /* Test multi vector command that needs to be split by strip and then needs to be 1341 * split further due to the capacity of child iovs, the child request offset should 1342 * be rewind to last aligned offset and go success without error. 1343 */ 1344 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1345 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1346 iov[i].iov_len = 512; 1347 } 1348 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1349 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1350 1351 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1352 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1353 1354 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1355 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1356 1357 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1358 g_io_done = false; 1359 g_io_status = 0; 1360 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1361 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1362 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1363 expected_io->md_buf = md_buf; 1364 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1365 ut_expected_io_set_iov(expected_io, i, 1366 (void *)((i + 1) * 0x10000), 512); 1367 } 1368 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1369 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1370 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1371 1, 2); 1372 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1373 ut_expected_io_set_iov(expected_io, 0, 1374 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1375 ut_expected_io_set_iov(expected_io, 1, 1376 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1377 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1378 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1379 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1380 1, 1); 1381 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1382 ut_expected_io_set_iov(expected_io, 0, 1383 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1384 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1385 1386 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1387 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1388 CU_ASSERT(rc == 0); 1389 CU_ASSERT(g_io_done == false); 1390 1391 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1392 stub_complete_io(1); 1393 CU_ASSERT(g_io_done == false); 1394 1395 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1396 stub_complete_io(2); 1397 CU_ASSERT(g_io_done == true); 1398 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1399 1400 /* Test multi vector command that needs to be split due to the IO boundary and 1401 * the capacity of child iovs. Especially test the case when the command is 1402 * split due to the capacity of child iovs, the tail address is not aligned with 1403 * block size and is rewinded to the aligned address. 1404 * 1405 * The iovecs used in read request is complex but is based on the data 1406 * collected in the real issue. We change the base addresses but keep the lengths 1407 * not to loose the credibility of the test. 1408 */ 1409 bdev->optimal_io_boundary = 128; 1410 g_io_done = false; 1411 g_io_status = 0; 1412 1413 for (i = 0; i < 31; i++) { 1414 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1415 iov[i].iov_len = 1024; 1416 } 1417 iov[31].iov_base = (void *)0xFEED1F00000; 1418 iov[31].iov_len = 32768; 1419 iov[32].iov_base = (void *)0xFEED2000000; 1420 iov[32].iov_len = 160; 1421 iov[33].iov_base = (void *)0xFEED2100000; 1422 iov[33].iov_len = 4096; 1423 iov[34].iov_base = (void *)0xFEED2200000; 1424 iov[34].iov_len = 4096; 1425 iov[35].iov_base = (void *)0xFEED2300000; 1426 iov[35].iov_len = 4096; 1427 iov[36].iov_base = (void *)0xFEED2400000; 1428 iov[36].iov_len = 4096; 1429 iov[37].iov_base = (void *)0xFEED2500000; 1430 iov[37].iov_len = 4096; 1431 iov[38].iov_base = (void *)0xFEED2600000; 1432 iov[38].iov_len = 4096; 1433 iov[39].iov_base = (void *)0xFEED2700000; 1434 iov[39].iov_len = 4096; 1435 iov[40].iov_base = (void *)0xFEED2800000; 1436 iov[40].iov_len = 4096; 1437 iov[41].iov_base = (void *)0xFEED2900000; 1438 iov[41].iov_len = 4096; 1439 iov[42].iov_base = (void *)0xFEED2A00000; 1440 iov[42].iov_len = 4096; 1441 iov[43].iov_base = (void *)0xFEED2B00000; 1442 iov[43].iov_len = 12288; 1443 iov[44].iov_base = (void *)0xFEED2C00000; 1444 iov[44].iov_len = 8192; 1445 iov[45].iov_base = (void *)0xFEED2F00000; 1446 iov[45].iov_len = 4096; 1447 iov[46].iov_base = (void *)0xFEED3000000; 1448 iov[46].iov_len = 4096; 1449 iov[47].iov_base = (void *)0xFEED3100000; 1450 iov[47].iov_len = 4096; 1451 iov[48].iov_base = (void *)0xFEED3200000; 1452 iov[48].iov_len = 24576; 1453 iov[49].iov_base = (void *)0xFEED3300000; 1454 iov[49].iov_len = 16384; 1455 iov[50].iov_base = (void *)0xFEED3400000; 1456 iov[50].iov_len = 12288; 1457 iov[51].iov_base = (void *)0xFEED3500000; 1458 iov[51].iov_len = 4096; 1459 iov[52].iov_base = (void *)0xFEED3600000; 1460 iov[52].iov_len = 4096; 1461 iov[53].iov_base = (void *)0xFEED3700000; 1462 iov[53].iov_len = 4096; 1463 iov[54].iov_base = (void *)0xFEED3800000; 1464 iov[54].iov_len = 28672; 1465 iov[55].iov_base = (void *)0xFEED3900000; 1466 iov[55].iov_len = 20480; 1467 iov[56].iov_base = (void *)0xFEED3A00000; 1468 iov[56].iov_len = 4096; 1469 iov[57].iov_base = (void *)0xFEED3B00000; 1470 iov[57].iov_len = 12288; 1471 iov[58].iov_base = (void *)0xFEED3C00000; 1472 iov[58].iov_len = 4096; 1473 iov[59].iov_base = (void *)0xFEED3D00000; 1474 iov[59].iov_len = 4096; 1475 iov[60].iov_base = (void *)0xFEED3E00000; 1476 iov[60].iov_len = 352; 1477 1478 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1479 * of child iovs, 1480 */ 1481 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1482 expected_io->md_buf = md_buf; 1483 for (i = 0; i < 32; i++) { 1484 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1485 } 1486 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1487 1488 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1489 * split by the IO boundary requirement. 1490 */ 1491 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1492 expected_io->md_buf = md_buf + 126 * 8; 1493 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1494 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1495 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1496 1497 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1498 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1499 */ 1500 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1501 expected_io->md_buf = md_buf + 128 * 8; 1502 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1503 iov[33].iov_len - 864); 1504 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1505 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1506 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1507 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1508 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1509 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1510 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1511 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1512 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1513 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1514 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1515 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1516 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1517 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1518 1519 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1520 * first 864 bytes of iov[52] split by the IO boundary requirement. 1521 */ 1522 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1523 expected_io->md_buf = md_buf + 256 * 8; 1524 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1525 iov[46].iov_len - 864); 1526 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1527 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1528 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1529 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1530 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1531 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1532 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1533 1534 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1535 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1536 */ 1537 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1538 expected_io->md_buf = md_buf + 384 * 8; 1539 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1540 iov[52].iov_len - 864); 1541 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1542 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1543 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1544 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1545 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1546 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1547 1548 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1549 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1550 */ 1551 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1552 expected_io->md_buf = md_buf + 512 * 8; 1553 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1554 iov[57].iov_len - 4960); 1555 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1556 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1557 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1558 1559 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1560 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1561 expected_io->md_buf = md_buf + 542 * 8; 1562 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1563 iov[59].iov_len - 3936); 1564 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1565 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1566 1567 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1568 0, 543, io_done, NULL); 1569 CU_ASSERT(rc == 0); 1570 CU_ASSERT(g_io_done == false); 1571 1572 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1573 stub_complete_io(1); 1574 CU_ASSERT(g_io_done == false); 1575 1576 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1577 stub_complete_io(5); 1578 CU_ASSERT(g_io_done == false); 1579 1580 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1581 stub_complete_io(1); 1582 CU_ASSERT(g_io_done == true); 1583 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1584 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1585 1586 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1587 * split, so test that. 1588 */ 1589 bdev->optimal_io_boundary = 15; 1590 g_io_done = false; 1591 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1592 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1593 1594 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1595 CU_ASSERT(rc == 0); 1596 CU_ASSERT(g_io_done == false); 1597 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1598 stub_complete_io(1); 1599 CU_ASSERT(g_io_done == true); 1600 1601 /* Test an UNMAP. This should also not be split. */ 1602 bdev->optimal_io_boundary = 16; 1603 g_io_done = false; 1604 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1605 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1606 1607 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1608 CU_ASSERT(rc == 0); 1609 CU_ASSERT(g_io_done == false); 1610 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1611 stub_complete_io(1); 1612 CU_ASSERT(g_io_done == true); 1613 1614 /* Test a FLUSH. This should also not be split. */ 1615 bdev->optimal_io_boundary = 16; 1616 g_io_done = false; 1617 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1618 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1619 1620 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1621 CU_ASSERT(rc == 0); 1622 CU_ASSERT(g_io_done == false); 1623 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1624 stub_complete_io(1); 1625 CU_ASSERT(g_io_done == true); 1626 1627 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1628 1629 /* Children requests return an error status */ 1630 bdev->optimal_io_boundary = 16; 1631 iov[0].iov_base = (void *)0x10000; 1632 iov[0].iov_len = 512 * 64; 1633 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1634 g_io_done = false; 1635 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1636 1637 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1638 CU_ASSERT(rc == 0); 1639 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1640 stub_complete_io(4); 1641 CU_ASSERT(g_io_done == false); 1642 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1643 stub_complete_io(1); 1644 CU_ASSERT(g_io_done == true); 1645 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1646 1647 /* Test if a multi vector command terminated with failure before continueing 1648 * splitting process when one of child I/O failed. 1649 * The multi vector command is as same as the above that needs to be split by strip 1650 * and then needs to be split further due to the capacity of child iovs. 1651 */ 1652 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1653 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1654 iov[i].iov_len = 512; 1655 } 1656 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1657 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1658 1659 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1660 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1661 1662 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1663 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1664 1665 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1666 1667 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1668 g_io_done = false; 1669 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1670 1671 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1672 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1673 CU_ASSERT(rc == 0); 1674 CU_ASSERT(g_io_done == false); 1675 1676 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1677 stub_complete_io(1); 1678 CU_ASSERT(g_io_done == true); 1679 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1680 1681 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1682 1683 /* for this test we will create the following conditions to hit the code path where 1684 * we are trying to send and IO following a split that has no iovs because we had to 1685 * trim them for alignment reasons. 1686 * 1687 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1688 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1689 * position 30 and overshoot by 0x2e. 1690 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1691 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1692 * which eliniates that vector so we just send the first split IO with 30 vectors 1693 * and let the completion pick up the last 2 vectors. 1694 */ 1695 bdev->optimal_io_boundary = 32; 1696 bdev->split_on_optimal_io_boundary = true; 1697 g_io_done = false; 1698 1699 /* Init all parent IOVs to 0x212 */ 1700 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1701 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1702 iov[i].iov_len = 0x212; 1703 } 1704 1705 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1706 BDEV_IO_NUM_CHILD_IOV - 1); 1707 /* expect 0-29 to be 1:1 with the parent iov */ 1708 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1709 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1710 } 1711 1712 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1713 * where 0x1e is the amount we overshot the 16K boundary 1714 */ 1715 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1716 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1717 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1718 1719 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1720 * shortened that take it to the next boundary and then a final one to get us to 1721 * 0x4200 bytes for the IO. 1722 */ 1723 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1724 BDEV_IO_NUM_CHILD_IOV, 2); 1725 /* position 30 picked up the remaining bytes to the next boundary */ 1726 ut_expected_io_set_iov(expected_io, 0, 1727 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1728 1729 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1730 ut_expected_io_set_iov(expected_io, 1, 1731 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1732 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1733 1734 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1735 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1736 CU_ASSERT(rc == 0); 1737 CU_ASSERT(g_io_done == false); 1738 1739 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1740 stub_complete_io(1); 1741 CU_ASSERT(g_io_done == false); 1742 1743 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1744 stub_complete_io(1); 1745 CU_ASSERT(g_io_done == true); 1746 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1747 1748 spdk_put_io_channel(io_ch); 1749 spdk_bdev_close(desc); 1750 free_bdev(bdev); 1751 spdk_bdev_finish(bdev_fini_cb, NULL); 1752 poll_threads(); 1753 } 1754 1755 static void 1756 bdev_io_max_size_and_segment_split_test(void) 1757 { 1758 struct spdk_bdev *bdev; 1759 struct spdk_bdev_desc *desc = NULL; 1760 struct spdk_io_channel *io_ch; 1761 struct spdk_bdev_opts bdev_opts = {}; 1762 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1763 struct ut_expected_io *expected_io; 1764 uint64_t i; 1765 int rc; 1766 1767 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1768 bdev_opts.bdev_io_pool_size = 512; 1769 bdev_opts.bdev_io_cache_size = 64; 1770 1771 bdev_opts.opts_size = sizeof(bdev_opts); 1772 rc = spdk_bdev_set_opts(&bdev_opts); 1773 CU_ASSERT(rc == 0); 1774 spdk_bdev_initialize(bdev_init_cb, NULL); 1775 1776 bdev = allocate_bdev("bdev0"); 1777 1778 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1779 CU_ASSERT(rc == 0); 1780 SPDK_CU_ASSERT_FATAL(desc != NULL); 1781 io_ch = spdk_bdev_get_io_channel(desc); 1782 CU_ASSERT(io_ch != NULL); 1783 1784 bdev->split_on_optimal_io_boundary = false; 1785 bdev->optimal_io_boundary = 0; 1786 1787 /* Case 0 max_num_segments == 0. 1788 * but segment size 2 * 512 > 512 1789 */ 1790 bdev->max_segment_size = 512; 1791 bdev->max_num_segments = 0; 1792 g_io_done = false; 1793 1794 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1795 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1796 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1797 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1798 1799 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1800 CU_ASSERT(rc == 0); 1801 CU_ASSERT(g_io_done == false); 1802 1803 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1804 stub_complete_io(1); 1805 CU_ASSERT(g_io_done == true); 1806 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1807 1808 /* Case 1 max_segment_size == 0 1809 * but iov num 2 > 1. 1810 */ 1811 bdev->max_segment_size = 0; 1812 bdev->max_num_segments = 1; 1813 g_io_done = false; 1814 1815 iov[0].iov_base = (void *)0x10000; 1816 iov[0].iov_len = 512; 1817 iov[1].iov_base = (void *)0x20000; 1818 iov[1].iov_len = 8 * 512; 1819 1820 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1821 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1822 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1823 1824 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1825 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1826 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1827 1828 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1829 CU_ASSERT(rc == 0); 1830 CU_ASSERT(g_io_done == false); 1831 1832 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1833 stub_complete_io(2); 1834 CU_ASSERT(g_io_done == true); 1835 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1836 1837 /* Test that a non-vector command is split correctly. 1838 * Set up the expected values before calling spdk_bdev_read_blocks 1839 */ 1840 bdev->max_segment_size = 512; 1841 bdev->max_num_segments = 1; 1842 g_io_done = false; 1843 1844 /* Child IO 0 */ 1845 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1846 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1847 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1848 1849 /* Child IO 1 */ 1850 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1851 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1852 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1853 1854 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1855 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1856 CU_ASSERT(rc == 0); 1857 CU_ASSERT(g_io_done == false); 1858 1859 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1860 stub_complete_io(2); 1861 CU_ASSERT(g_io_done == true); 1862 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1863 1864 /* Now set up a more complex, multi-vector command that needs to be split, 1865 * including splitting iovecs. 1866 */ 1867 bdev->max_segment_size = 2 * 512; 1868 bdev->max_num_segments = 1; 1869 g_io_done = false; 1870 1871 iov[0].iov_base = (void *)0x10000; 1872 iov[0].iov_len = 2 * 512; 1873 iov[1].iov_base = (void *)0x20000; 1874 iov[1].iov_len = 4 * 512; 1875 iov[2].iov_base = (void *)0x30000; 1876 iov[2].iov_len = 6 * 512; 1877 1878 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1879 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1880 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1881 1882 /* Split iov[1].size to 2 iov entries then split the segments */ 1883 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1884 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1885 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1886 1887 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1888 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1889 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1890 1891 /* Split iov[2].size to 3 iov entries then split the segments */ 1892 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1893 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1894 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1895 1896 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1897 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1898 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1899 1900 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1901 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1902 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1903 1904 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1905 CU_ASSERT(rc == 0); 1906 CU_ASSERT(g_io_done == false); 1907 1908 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 1909 stub_complete_io(6); 1910 CU_ASSERT(g_io_done == true); 1911 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1912 1913 /* Test multi vector command that needs to be split by strip and then needs to be 1914 * split further due to the capacity of parent IO child iovs. 1915 */ 1916 bdev->max_segment_size = 512; 1917 bdev->max_num_segments = 1; 1918 g_io_done = false; 1919 1920 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1921 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1922 iov[i].iov_len = 512 * 2; 1923 } 1924 1925 /* Each input iov.size is split into 2 iovs, 1926 * half of the input iov can fill all child iov entries of a single IO. 1927 */ 1928 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 1929 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 1930 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1931 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1932 1933 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 1934 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1935 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1936 } 1937 1938 /* The remaining iov is split in the second round */ 1939 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1940 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 1941 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1942 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1943 1944 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 1945 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1946 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1947 } 1948 1949 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 1950 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1951 CU_ASSERT(rc == 0); 1952 CU_ASSERT(g_io_done == false); 1953 1954 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1955 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1956 CU_ASSERT(g_io_done == false); 1957 1958 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1959 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1960 CU_ASSERT(g_io_done == true); 1961 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1962 1963 /* A wrong case, a child IO that is divided does 1964 * not meet the principle of multiples of block size, 1965 * and exits with error 1966 */ 1967 bdev->max_segment_size = 512; 1968 bdev->max_num_segments = 1; 1969 g_io_done = false; 1970 1971 iov[0].iov_base = (void *)0x10000; 1972 iov[0].iov_len = 512 + 256; 1973 iov[1].iov_base = (void *)0x20000; 1974 iov[1].iov_len = 256; 1975 1976 /* iov[0] is split to 512 and 256. 1977 * 256 is less than a block size, and it is found 1978 * in the next round of split that it is the first child IO smaller than 1979 * the block size, so the error exit 1980 */ 1981 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 1982 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 1983 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1984 1985 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 1986 CU_ASSERT(rc == 0); 1987 CU_ASSERT(g_io_done == false); 1988 1989 /* First child IO is OK */ 1990 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1991 stub_complete_io(1); 1992 CU_ASSERT(g_io_done == true); 1993 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1994 1995 /* error exit */ 1996 stub_complete_io(1); 1997 CU_ASSERT(g_io_done == true); 1998 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1999 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2000 2001 /* Test multi vector command that needs to be split by strip and then needs to be 2002 * split further due to the capacity of child iovs. 2003 * 2004 * In this case, the last two iovs need to be split, but it will exceed the capacity 2005 * of child iovs, so it needs to wait until the first batch completed. 2006 */ 2007 bdev->max_segment_size = 512; 2008 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2009 g_io_done = false; 2010 2011 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2012 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2013 iov[i].iov_len = 512; 2014 } 2015 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2016 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2017 iov[i].iov_len = 512 * 2; 2018 } 2019 2020 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2021 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2022 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2023 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2024 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2025 } 2026 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2027 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2028 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2029 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2030 2031 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2032 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2033 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2034 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2035 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2036 2037 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2038 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2039 CU_ASSERT(rc == 0); 2040 CU_ASSERT(g_io_done == false); 2041 2042 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2043 stub_complete_io(1); 2044 CU_ASSERT(g_io_done == false); 2045 2046 /* Next round */ 2047 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2048 stub_complete_io(1); 2049 CU_ASSERT(g_io_done == true); 2050 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2051 2052 /* This case is similar to the previous one, but the io composed of 2053 * the last few entries of child iov is not enough for a blocklen, so they 2054 * cannot be put into this IO, but wait until the next time. 2055 */ 2056 bdev->max_segment_size = 512; 2057 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2058 g_io_done = false; 2059 2060 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2061 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2062 iov[i].iov_len = 512; 2063 } 2064 2065 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2066 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2067 iov[i].iov_len = 128; 2068 } 2069 2070 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2071 * Because the left 2 iov is not enough for a blocklen. 2072 */ 2073 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2074 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2075 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2076 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2077 } 2078 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2079 2080 /* The second child io waits until the end of the first child io before executing. 2081 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2082 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2083 */ 2084 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2085 1, 4); 2086 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2087 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2088 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2089 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2090 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2091 2092 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2093 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2094 CU_ASSERT(rc == 0); 2095 CU_ASSERT(g_io_done == false); 2096 2097 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2098 stub_complete_io(1); 2099 CU_ASSERT(g_io_done == false); 2100 2101 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2102 stub_complete_io(1); 2103 CU_ASSERT(g_io_done == true); 2104 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2105 2106 /* A very complicated case. Each sg entry exceeds max_segment_size and 2107 * needs to be split. At the same time, child io must be a multiple of blocklen. 2108 * At the same time, child iovcnt exceeds parent iovcnt. 2109 */ 2110 bdev->max_segment_size = 512 + 128; 2111 bdev->max_num_segments = 3; 2112 g_io_done = false; 2113 2114 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2115 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2116 iov[i].iov_len = 512 + 256; 2117 } 2118 2119 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2120 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2121 iov[i].iov_len = 512 + 128; 2122 } 2123 2124 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2125 * Consume 4 parent IO iov entries per for() round and 6 block size. 2126 * Generate 9 child IOs. 2127 */ 2128 for (i = 0; i < 3; i++) { 2129 uint32_t j = i * 4; 2130 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2131 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2132 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2133 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2134 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2135 2136 /* Child io must be a multiple of blocklen 2137 * iov[j + 2] must be split. If the third entry is also added, 2138 * the multiple of blocklen cannot be guaranteed. But it still 2139 * occupies one iov entry of the parent child iov. 2140 */ 2141 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2142 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2143 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2144 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2145 2146 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2147 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2148 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2149 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2150 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2151 } 2152 2153 /* Child iov position at 27, the 10th child IO 2154 * iov entry index is 3 * 4 and offset is 3 * 6 2155 */ 2156 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2157 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2158 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2159 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2160 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2161 2162 /* Child iov position at 30, the 11th child IO */ 2163 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2164 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2165 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2166 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2167 2168 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2169 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2170 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2171 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2172 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2173 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2174 2175 /* Consume 9 child IOs and 27 child iov entries. 2176 * Consume 4 parent IO iov entries per for() round and 6 block size. 2177 * Parent IO iov index start from 16 and block offset start from 24 2178 */ 2179 for (i = 0; i < 3; i++) { 2180 uint32_t j = i * 4 + 16; 2181 uint32_t offset = i * 6 + 24; 2182 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2183 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2184 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2185 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2186 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2187 2188 /* Child io must be a multiple of blocklen 2189 * iov[j + 2] must be split. If the third entry is also added, 2190 * the multiple of blocklen cannot be guaranteed. But it still 2191 * occupies one iov entry of the parent child iov. 2192 */ 2193 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2194 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2195 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2196 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2197 2198 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2199 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2200 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2201 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2202 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2203 } 2204 2205 /* The 22th child IO, child iov position at 30 */ 2206 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2207 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2208 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2209 2210 /* The third round */ 2211 /* Here is the 23nd child IO and child iovpos is 0 */ 2212 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2213 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2214 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2215 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2216 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2217 2218 /* The 24th child IO */ 2219 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2220 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2221 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2222 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2223 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2224 2225 /* The 25th child IO */ 2226 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2227 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2228 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2229 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2230 2231 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2232 50, io_done, NULL); 2233 CU_ASSERT(rc == 0); 2234 CU_ASSERT(g_io_done == false); 2235 2236 /* Parent IO supports up to 32 child iovs, so it is calculated that 2237 * a maximum of 11 IOs can be split at a time, and the 2238 * splitting will continue after the first batch is over. 2239 */ 2240 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2241 stub_complete_io(11); 2242 CU_ASSERT(g_io_done == false); 2243 2244 /* The 2nd round */ 2245 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2246 stub_complete_io(11); 2247 CU_ASSERT(g_io_done == false); 2248 2249 /* The last round */ 2250 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2251 stub_complete_io(3); 2252 CU_ASSERT(g_io_done == true); 2253 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2254 2255 /* Test an WRITE_ZEROES. This should also not be split. */ 2256 bdev->max_segment_size = 512; 2257 bdev->max_num_segments = 1; 2258 g_io_done = false; 2259 2260 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2261 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2262 2263 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2264 CU_ASSERT(rc == 0); 2265 CU_ASSERT(g_io_done == false); 2266 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2267 stub_complete_io(1); 2268 CU_ASSERT(g_io_done == true); 2269 2270 /* Test an UNMAP. This should also not be split. */ 2271 g_io_done = false; 2272 2273 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2274 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2275 2276 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2277 CU_ASSERT(rc == 0); 2278 CU_ASSERT(g_io_done == false); 2279 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2280 stub_complete_io(1); 2281 CU_ASSERT(g_io_done == true); 2282 2283 /* Test a FLUSH. This should also not be split. */ 2284 g_io_done = false; 2285 2286 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2287 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2288 2289 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2290 CU_ASSERT(rc == 0); 2291 CU_ASSERT(g_io_done == false); 2292 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2293 stub_complete_io(1); 2294 CU_ASSERT(g_io_done == true); 2295 2296 spdk_put_io_channel(io_ch); 2297 spdk_bdev_close(desc); 2298 free_bdev(bdev); 2299 spdk_bdev_finish(bdev_fini_cb, NULL); 2300 poll_threads(); 2301 } 2302 2303 static void 2304 bdev_io_mix_split_test(void) 2305 { 2306 struct spdk_bdev *bdev; 2307 struct spdk_bdev_desc *desc = NULL; 2308 struct spdk_io_channel *io_ch; 2309 struct spdk_bdev_opts bdev_opts = {}; 2310 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2311 struct ut_expected_io *expected_io; 2312 uint64_t i; 2313 int rc; 2314 2315 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2316 bdev_opts.bdev_io_pool_size = 512; 2317 bdev_opts.bdev_io_cache_size = 64; 2318 2319 rc = spdk_bdev_set_opts(&bdev_opts); 2320 CU_ASSERT(rc == 0); 2321 spdk_bdev_initialize(bdev_init_cb, NULL); 2322 2323 bdev = allocate_bdev("bdev0"); 2324 2325 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2326 CU_ASSERT(rc == 0); 2327 SPDK_CU_ASSERT_FATAL(desc != NULL); 2328 io_ch = spdk_bdev_get_io_channel(desc); 2329 CU_ASSERT(io_ch != NULL); 2330 2331 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2332 bdev->split_on_optimal_io_boundary = true; 2333 bdev->optimal_io_boundary = 16; 2334 2335 bdev->max_segment_size = 512; 2336 bdev->max_num_segments = 16; 2337 g_io_done = false; 2338 2339 /* IO crossing the IO boundary requires split 2340 * Total 2 child IOs. 2341 */ 2342 2343 /* The 1st child IO split the segment_size to multiple segment entry */ 2344 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2345 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2346 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2347 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2348 2349 /* The 2nd child IO split the segment_size to multiple segment entry */ 2350 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2351 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2352 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2353 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2354 2355 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2356 CU_ASSERT(rc == 0); 2357 CU_ASSERT(g_io_done == false); 2358 2359 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2360 stub_complete_io(2); 2361 CU_ASSERT(g_io_done == true); 2362 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2363 2364 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2365 bdev->max_segment_size = 15 * 512; 2366 bdev->max_num_segments = 1; 2367 g_io_done = false; 2368 2369 /* IO crossing the IO boundary requires split. 2370 * The 1st child IO segment size exceeds the max_segment_size, 2371 * So 1st child IO will be splitted to multiple segment entry. 2372 * Then it split to 2 child IOs because of the max_num_segments. 2373 * Total 3 child IOs. 2374 */ 2375 2376 /* The first 2 IOs are in an IO boundary. 2377 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2378 * So it split to the first 2 IOs. 2379 */ 2380 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2381 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2382 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2383 2384 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2385 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2386 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2387 2388 /* The 3rd Child IO is because of the io boundary */ 2389 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2390 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2391 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2392 2393 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2394 CU_ASSERT(rc == 0); 2395 CU_ASSERT(g_io_done == false); 2396 2397 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2398 stub_complete_io(3); 2399 CU_ASSERT(g_io_done == true); 2400 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2401 2402 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2403 bdev->max_segment_size = 17 * 512; 2404 bdev->max_num_segments = 1; 2405 g_io_done = false; 2406 2407 /* IO crossing the IO boundary requires split. 2408 * Child IO does not split. 2409 * Total 2 child IOs. 2410 */ 2411 2412 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2413 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2415 2416 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2417 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2418 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2419 2420 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2421 CU_ASSERT(rc == 0); 2422 CU_ASSERT(g_io_done == false); 2423 2424 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2425 stub_complete_io(2); 2426 CU_ASSERT(g_io_done == true); 2427 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2428 2429 /* Now set up a more complex, multi-vector command that needs to be split, 2430 * including splitting iovecs. 2431 * optimal_io_boundary < max_segment_size * max_num_segments 2432 */ 2433 bdev->max_segment_size = 3 * 512; 2434 bdev->max_num_segments = 6; 2435 g_io_done = false; 2436 2437 iov[0].iov_base = (void *)0x10000; 2438 iov[0].iov_len = 4 * 512; 2439 iov[1].iov_base = (void *)0x20000; 2440 iov[1].iov_len = 4 * 512; 2441 iov[2].iov_base = (void *)0x30000; 2442 iov[2].iov_len = 10 * 512; 2443 2444 /* IO crossing the IO boundary requires split. 2445 * The 1st child IO segment size exceeds the max_segment_size and after 2446 * splitting segment_size, the num_segments exceeds max_num_segments. 2447 * So 1st child IO will be splitted to 2 child IOs. 2448 * Total 3 child IOs. 2449 */ 2450 2451 /* The first 2 IOs are in an IO boundary. 2452 * After splitting segmemt size the segment num exceeds. 2453 * So it splits to 2 child IOs. 2454 */ 2455 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2456 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2457 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2458 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2459 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2460 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2461 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2462 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2463 2464 /* The 2nd child IO has the left segment entry */ 2465 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2466 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2467 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2468 2469 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2470 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2471 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2472 2473 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2474 CU_ASSERT(rc == 0); 2475 CU_ASSERT(g_io_done == false); 2476 2477 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2478 stub_complete_io(3); 2479 CU_ASSERT(g_io_done == true); 2480 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2481 2482 /* A very complicated case. Each sg entry exceeds max_segment_size 2483 * and split on io boundary. 2484 * optimal_io_boundary < max_segment_size * max_num_segments 2485 */ 2486 bdev->max_segment_size = 3 * 512; 2487 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2488 g_io_done = false; 2489 2490 for (i = 0; i < 20; i++) { 2491 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2492 iov[i].iov_len = 512 * 4; 2493 } 2494 2495 /* IO crossing the IO boundary requires split. 2496 * 80 block length can split 5 child IOs base on offset and IO boundary. 2497 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2498 * Total 5 child IOs. 2499 */ 2500 2501 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2502 * So each child IO occupies 8 child iov entries. 2503 */ 2504 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2505 for (i = 0; i < 4; i++) { 2506 int iovcnt = i * 2; 2507 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2508 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2509 } 2510 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2511 2512 /* 2nd child IO and total 16 child iov entries of parent IO */ 2513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2514 for (i = 4; i < 8; i++) { 2515 int iovcnt = (i - 4) * 2; 2516 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2517 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2518 } 2519 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2520 2521 /* 3rd child IO and total 24 child iov entries of parent IO */ 2522 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2523 for (i = 8; i < 12; i++) { 2524 int iovcnt = (i - 8) * 2; 2525 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2526 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2527 } 2528 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2529 2530 /* 4th child IO and total 32 child iov entries of parent IO */ 2531 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2532 for (i = 12; i < 16; i++) { 2533 int iovcnt = (i - 12) * 2; 2534 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2535 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2536 } 2537 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2538 2539 /* 5th child IO and because of the child iov entry it should be splitted 2540 * in next round. 2541 */ 2542 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2543 for (i = 16; i < 20; i++) { 2544 int iovcnt = (i - 16) * 2; 2545 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2546 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2547 } 2548 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2549 2550 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2551 CU_ASSERT(rc == 0); 2552 CU_ASSERT(g_io_done == false); 2553 2554 /* First split round */ 2555 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2556 stub_complete_io(4); 2557 CU_ASSERT(g_io_done == false); 2558 2559 /* Second split round */ 2560 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2561 stub_complete_io(1); 2562 CU_ASSERT(g_io_done == true); 2563 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2564 2565 spdk_put_io_channel(io_ch); 2566 spdk_bdev_close(desc); 2567 free_bdev(bdev); 2568 spdk_bdev_finish(bdev_fini_cb, NULL); 2569 poll_threads(); 2570 } 2571 2572 static void 2573 bdev_io_split_with_io_wait(void) 2574 { 2575 struct spdk_bdev *bdev; 2576 struct spdk_bdev_desc *desc = NULL; 2577 struct spdk_io_channel *io_ch; 2578 struct spdk_bdev_channel *channel; 2579 struct spdk_bdev_mgmt_channel *mgmt_ch; 2580 struct spdk_bdev_opts bdev_opts = {}; 2581 struct iovec iov[3]; 2582 struct ut_expected_io *expected_io; 2583 int rc; 2584 2585 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2586 bdev_opts.bdev_io_pool_size = 2; 2587 bdev_opts.bdev_io_cache_size = 1; 2588 2589 rc = spdk_bdev_set_opts(&bdev_opts); 2590 CU_ASSERT(rc == 0); 2591 spdk_bdev_initialize(bdev_init_cb, NULL); 2592 2593 bdev = allocate_bdev("bdev0"); 2594 2595 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2596 CU_ASSERT(rc == 0); 2597 CU_ASSERT(desc != NULL); 2598 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2599 io_ch = spdk_bdev_get_io_channel(desc); 2600 CU_ASSERT(io_ch != NULL); 2601 channel = spdk_io_channel_get_ctx(io_ch); 2602 mgmt_ch = channel->shared_resource->mgmt_ch; 2603 2604 bdev->optimal_io_boundary = 16; 2605 bdev->split_on_optimal_io_boundary = true; 2606 2607 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2608 CU_ASSERT(rc == 0); 2609 2610 /* Now test that a single-vector command is split correctly. 2611 * Offset 14, length 8, payload 0xF000 2612 * Child - Offset 14, length 2, payload 0xF000 2613 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2614 * 2615 * Set up the expected values before calling spdk_bdev_read_blocks 2616 */ 2617 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2618 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2619 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2620 2621 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2622 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2623 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2624 2625 /* The following children will be submitted sequentially due to the capacity of 2626 * spdk_bdev_io. 2627 */ 2628 2629 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2630 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2631 CU_ASSERT(rc == 0); 2632 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2633 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2634 2635 /* Completing the first read I/O will submit the first child */ 2636 stub_complete_io(1); 2637 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2638 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2639 2640 /* Completing the first child will submit the second child */ 2641 stub_complete_io(1); 2642 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2643 2644 /* Complete the second child I/O. This should result in our callback getting 2645 * invoked since the parent I/O is now complete. 2646 */ 2647 stub_complete_io(1); 2648 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2649 2650 /* Now set up a more complex, multi-vector command that needs to be split, 2651 * including splitting iovecs. 2652 */ 2653 iov[0].iov_base = (void *)0x10000; 2654 iov[0].iov_len = 512; 2655 iov[1].iov_base = (void *)0x20000; 2656 iov[1].iov_len = 20 * 512; 2657 iov[2].iov_base = (void *)0x30000; 2658 iov[2].iov_len = 11 * 512; 2659 2660 g_io_done = false; 2661 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2662 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2663 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2664 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2665 2666 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2667 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2668 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2669 2670 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2671 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2672 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2673 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2674 2675 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2676 CU_ASSERT(rc == 0); 2677 CU_ASSERT(g_io_done == false); 2678 2679 /* The following children will be submitted sequentially due to the capacity of 2680 * spdk_bdev_io. 2681 */ 2682 2683 /* Completing the first child will submit the second child */ 2684 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2685 stub_complete_io(1); 2686 CU_ASSERT(g_io_done == false); 2687 2688 /* Completing the second child will submit the third child */ 2689 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2690 stub_complete_io(1); 2691 CU_ASSERT(g_io_done == false); 2692 2693 /* Completing the third child will result in our callback getting invoked 2694 * since the parent I/O is now complete. 2695 */ 2696 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2697 stub_complete_io(1); 2698 CU_ASSERT(g_io_done == true); 2699 2700 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2701 2702 spdk_put_io_channel(io_ch); 2703 spdk_bdev_close(desc); 2704 free_bdev(bdev); 2705 spdk_bdev_finish(bdev_fini_cb, NULL); 2706 poll_threads(); 2707 } 2708 2709 static void 2710 bdev_io_alignment(void) 2711 { 2712 struct spdk_bdev *bdev; 2713 struct spdk_bdev_desc *desc = NULL; 2714 struct spdk_io_channel *io_ch; 2715 struct spdk_bdev_opts bdev_opts = {}; 2716 int rc; 2717 void *buf = NULL; 2718 struct iovec iovs[2]; 2719 int iovcnt; 2720 uint64_t alignment; 2721 2722 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2723 bdev_opts.bdev_io_pool_size = 20; 2724 bdev_opts.bdev_io_cache_size = 2; 2725 2726 rc = spdk_bdev_set_opts(&bdev_opts); 2727 CU_ASSERT(rc == 0); 2728 spdk_bdev_initialize(bdev_init_cb, NULL); 2729 2730 fn_table.submit_request = stub_submit_request_get_buf; 2731 bdev = allocate_bdev("bdev0"); 2732 2733 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2734 CU_ASSERT(rc == 0); 2735 CU_ASSERT(desc != NULL); 2736 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2737 io_ch = spdk_bdev_get_io_channel(desc); 2738 CU_ASSERT(io_ch != NULL); 2739 2740 /* Create aligned buffer */ 2741 rc = posix_memalign(&buf, 4096, 8192); 2742 SPDK_CU_ASSERT_FATAL(rc == 0); 2743 2744 /* Pass aligned single buffer with no alignment required */ 2745 alignment = 1; 2746 bdev->required_alignment = spdk_u32log2(alignment); 2747 2748 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2749 CU_ASSERT(rc == 0); 2750 stub_complete_io(1); 2751 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2752 alignment)); 2753 2754 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2755 CU_ASSERT(rc == 0); 2756 stub_complete_io(1); 2757 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2758 alignment)); 2759 2760 /* Pass unaligned single buffer with no alignment required */ 2761 alignment = 1; 2762 bdev->required_alignment = spdk_u32log2(alignment); 2763 2764 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2765 CU_ASSERT(rc == 0); 2766 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2767 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2768 stub_complete_io(1); 2769 2770 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2771 CU_ASSERT(rc == 0); 2772 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2773 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2774 stub_complete_io(1); 2775 2776 /* Pass unaligned single buffer with 512 alignment required */ 2777 alignment = 512; 2778 bdev->required_alignment = spdk_u32log2(alignment); 2779 2780 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2781 CU_ASSERT(rc == 0); 2782 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2783 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2784 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2785 alignment)); 2786 stub_complete_io(1); 2787 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2788 2789 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2790 CU_ASSERT(rc == 0); 2791 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2792 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2793 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2794 alignment)); 2795 stub_complete_io(1); 2796 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2797 2798 /* Pass unaligned single buffer with 4096 alignment required */ 2799 alignment = 4096; 2800 bdev->required_alignment = spdk_u32log2(alignment); 2801 2802 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2803 CU_ASSERT(rc == 0); 2804 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2805 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2806 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2807 alignment)); 2808 stub_complete_io(1); 2809 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2810 2811 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2812 CU_ASSERT(rc == 0); 2813 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2814 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2815 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2816 alignment)); 2817 stub_complete_io(1); 2818 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2819 2820 /* Pass aligned iovs with no alignment required */ 2821 alignment = 1; 2822 bdev->required_alignment = spdk_u32log2(alignment); 2823 2824 iovcnt = 1; 2825 iovs[0].iov_base = buf; 2826 iovs[0].iov_len = 512; 2827 2828 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2829 CU_ASSERT(rc == 0); 2830 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2831 stub_complete_io(1); 2832 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2833 2834 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2835 CU_ASSERT(rc == 0); 2836 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2837 stub_complete_io(1); 2838 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2839 2840 /* Pass unaligned iovs with no alignment required */ 2841 alignment = 1; 2842 bdev->required_alignment = spdk_u32log2(alignment); 2843 2844 iovcnt = 2; 2845 iovs[0].iov_base = buf + 16; 2846 iovs[0].iov_len = 256; 2847 iovs[1].iov_base = buf + 16 + 256 + 32; 2848 iovs[1].iov_len = 256; 2849 2850 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2851 CU_ASSERT(rc == 0); 2852 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2853 stub_complete_io(1); 2854 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2855 2856 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2857 CU_ASSERT(rc == 0); 2858 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2859 stub_complete_io(1); 2860 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2861 2862 /* Pass unaligned iov with 2048 alignment required */ 2863 alignment = 2048; 2864 bdev->required_alignment = spdk_u32log2(alignment); 2865 2866 iovcnt = 2; 2867 iovs[0].iov_base = buf + 16; 2868 iovs[0].iov_len = 256; 2869 iovs[1].iov_base = buf + 16 + 256 + 32; 2870 iovs[1].iov_len = 256; 2871 2872 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2873 CU_ASSERT(rc == 0); 2874 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2875 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2876 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2877 alignment)); 2878 stub_complete_io(1); 2879 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2880 2881 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2882 CU_ASSERT(rc == 0); 2883 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2884 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2885 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2886 alignment)); 2887 stub_complete_io(1); 2888 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2889 2890 /* Pass iov without allocated buffer without alignment required */ 2891 alignment = 1; 2892 bdev->required_alignment = spdk_u32log2(alignment); 2893 2894 iovcnt = 1; 2895 iovs[0].iov_base = NULL; 2896 iovs[0].iov_len = 0; 2897 2898 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2899 CU_ASSERT(rc == 0); 2900 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2901 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2902 alignment)); 2903 stub_complete_io(1); 2904 2905 /* Pass iov without allocated buffer with 1024 alignment required */ 2906 alignment = 1024; 2907 bdev->required_alignment = spdk_u32log2(alignment); 2908 2909 iovcnt = 1; 2910 iovs[0].iov_base = NULL; 2911 iovs[0].iov_len = 0; 2912 2913 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2914 CU_ASSERT(rc == 0); 2915 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2916 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2917 alignment)); 2918 stub_complete_io(1); 2919 2920 spdk_put_io_channel(io_ch); 2921 spdk_bdev_close(desc); 2922 free_bdev(bdev); 2923 fn_table.submit_request = stub_submit_request; 2924 spdk_bdev_finish(bdev_fini_cb, NULL); 2925 poll_threads(); 2926 2927 free(buf); 2928 } 2929 2930 static void 2931 bdev_io_alignment_with_boundary(void) 2932 { 2933 struct spdk_bdev *bdev; 2934 struct spdk_bdev_desc *desc = NULL; 2935 struct spdk_io_channel *io_ch; 2936 struct spdk_bdev_opts bdev_opts = {}; 2937 int rc; 2938 void *buf = NULL; 2939 struct iovec iovs[2]; 2940 int iovcnt; 2941 uint64_t alignment; 2942 2943 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2944 bdev_opts.bdev_io_pool_size = 20; 2945 bdev_opts.bdev_io_cache_size = 2; 2946 2947 bdev_opts.opts_size = sizeof(bdev_opts); 2948 rc = spdk_bdev_set_opts(&bdev_opts); 2949 CU_ASSERT(rc == 0); 2950 spdk_bdev_initialize(bdev_init_cb, NULL); 2951 2952 fn_table.submit_request = stub_submit_request_get_buf; 2953 bdev = allocate_bdev("bdev0"); 2954 2955 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2956 CU_ASSERT(rc == 0); 2957 CU_ASSERT(desc != NULL); 2958 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2959 io_ch = spdk_bdev_get_io_channel(desc); 2960 CU_ASSERT(io_ch != NULL); 2961 2962 /* Create aligned buffer */ 2963 rc = posix_memalign(&buf, 4096, 131072); 2964 SPDK_CU_ASSERT_FATAL(rc == 0); 2965 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2966 2967 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2968 alignment = 512; 2969 bdev->required_alignment = spdk_u32log2(alignment); 2970 bdev->optimal_io_boundary = 2; 2971 bdev->split_on_optimal_io_boundary = true; 2972 2973 iovcnt = 1; 2974 iovs[0].iov_base = NULL; 2975 iovs[0].iov_len = 512 * 3; 2976 2977 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2978 CU_ASSERT(rc == 0); 2979 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2980 stub_complete_io(2); 2981 2982 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2983 alignment = 512; 2984 bdev->required_alignment = spdk_u32log2(alignment); 2985 bdev->optimal_io_boundary = 16; 2986 bdev->split_on_optimal_io_boundary = true; 2987 2988 iovcnt = 1; 2989 iovs[0].iov_base = NULL; 2990 iovs[0].iov_len = 512 * 16; 2991 2992 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2993 CU_ASSERT(rc == 0); 2994 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2995 stub_complete_io(2); 2996 2997 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2998 alignment = 512; 2999 bdev->required_alignment = spdk_u32log2(alignment); 3000 bdev->optimal_io_boundary = 128; 3001 bdev->split_on_optimal_io_boundary = true; 3002 3003 iovcnt = 1; 3004 iovs[0].iov_base = buf + 16; 3005 iovs[0].iov_len = 512 * 160; 3006 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3007 CU_ASSERT(rc == 0); 3008 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3009 stub_complete_io(2); 3010 3011 /* 512 * 3 with 2 IO boundary */ 3012 alignment = 512; 3013 bdev->required_alignment = spdk_u32log2(alignment); 3014 bdev->optimal_io_boundary = 2; 3015 bdev->split_on_optimal_io_boundary = true; 3016 3017 iovcnt = 2; 3018 iovs[0].iov_base = buf + 16; 3019 iovs[0].iov_len = 512; 3020 iovs[1].iov_base = buf + 16 + 512 + 32; 3021 iovs[1].iov_len = 1024; 3022 3023 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3024 CU_ASSERT(rc == 0); 3025 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3026 stub_complete_io(2); 3027 3028 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3029 CU_ASSERT(rc == 0); 3030 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3031 stub_complete_io(2); 3032 3033 /* 512 * 64 with 32 IO boundary */ 3034 bdev->optimal_io_boundary = 32; 3035 iovcnt = 2; 3036 iovs[0].iov_base = buf + 16; 3037 iovs[0].iov_len = 16384; 3038 iovs[1].iov_base = buf + 16 + 16384 + 32; 3039 iovs[1].iov_len = 16384; 3040 3041 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3042 CU_ASSERT(rc == 0); 3043 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3044 stub_complete_io(3); 3045 3046 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3047 CU_ASSERT(rc == 0); 3048 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3049 stub_complete_io(3); 3050 3051 /* 512 * 160 with 32 IO boundary */ 3052 iovcnt = 1; 3053 iovs[0].iov_base = buf + 16; 3054 iovs[0].iov_len = 16384 + 65536; 3055 3056 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3057 CU_ASSERT(rc == 0); 3058 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3059 stub_complete_io(6); 3060 3061 spdk_put_io_channel(io_ch); 3062 spdk_bdev_close(desc); 3063 free_bdev(bdev); 3064 fn_table.submit_request = stub_submit_request; 3065 spdk_bdev_finish(bdev_fini_cb, NULL); 3066 poll_threads(); 3067 3068 free(buf); 3069 } 3070 3071 static void 3072 histogram_status_cb(void *cb_arg, int status) 3073 { 3074 g_status = status; 3075 } 3076 3077 static void 3078 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3079 { 3080 g_status = status; 3081 g_histogram = histogram; 3082 } 3083 3084 static void 3085 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3086 uint64_t total, uint64_t so_far) 3087 { 3088 g_count += count; 3089 } 3090 3091 static void 3092 bdev_histograms(void) 3093 { 3094 struct spdk_bdev *bdev; 3095 struct spdk_bdev_desc *desc = NULL; 3096 struct spdk_io_channel *ch; 3097 struct spdk_histogram_data *histogram; 3098 uint8_t buf[4096]; 3099 int rc; 3100 3101 spdk_bdev_initialize(bdev_init_cb, NULL); 3102 3103 bdev = allocate_bdev("bdev"); 3104 3105 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3106 CU_ASSERT(rc == 0); 3107 CU_ASSERT(desc != NULL); 3108 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3109 3110 ch = spdk_bdev_get_io_channel(desc); 3111 CU_ASSERT(ch != NULL); 3112 3113 /* Enable histogram */ 3114 g_status = -1; 3115 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3116 poll_threads(); 3117 CU_ASSERT(g_status == 0); 3118 CU_ASSERT(bdev->internal.histogram_enabled == true); 3119 3120 /* Allocate histogram */ 3121 histogram = spdk_histogram_data_alloc(); 3122 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3123 3124 /* Check if histogram is zeroed */ 3125 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3126 poll_threads(); 3127 CU_ASSERT(g_status == 0); 3128 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3129 3130 g_count = 0; 3131 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3132 3133 CU_ASSERT(g_count == 0); 3134 3135 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3136 CU_ASSERT(rc == 0); 3137 3138 spdk_delay_us(10); 3139 stub_complete_io(1); 3140 poll_threads(); 3141 3142 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3143 CU_ASSERT(rc == 0); 3144 3145 spdk_delay_us(10); 3146 stub_complete_io(1); 3147 poll_threads(); 3148 3149 /* Check if histogram gathered data from all I/O channels */ 3150 g_histogram = NULL; 3151 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3152 poll_threads(); 3153 CU_ASSERT(g_status == 0); 3154 CU_ASSERT(bdev->internal.histogram_enabled == true); 3155 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3156 3157 g_count = 0; 3158 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3159 CU_ASSERT(g_count == 2); 3160 3161 /* Disable histogram */ 3162 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3163 poll_threads(); 3164 CU_ASSERT(g_status == 0); 3165 CU_ASSERT(bdev->internal.histogram_enabled == false); 3166 3167 /* Try to run histogram commands on disabled bdev */ 3168 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3169 poll_threads(); 3170 CU_ASSERT(g_status == -EFAULT); 3171 3172 spdk_histogram_data_free(histogram); 3173 spdk_put_io_channel(ch); 3174 spdk_bdev_close(desc); 3175 free_bdev(bdev); 3176 spdk_bdev_finish(bdev_fini_cb, NULL); 3177 poll_threads(); 3178 } 3179 3180 static void 3181 _bdev_compare(bool emulated) 3182 { 3183 struct spdk_bdev *bdev; 3184 struct spdk_bdev_desc *desc = NULL; 3185 struct spdk_io_channel *ioch; 3186 struct ut_expected_io *expected_io; 3187 uint64_t offset, num_blocks; 3188 uint32_t num_completed; 3189 char aa_buf[512]; 3190 char bb_buf[512]; 3191 struct iovec compare_iov; 3192 uint8_t io_type; 3193 int rc; 3194 3195 if (emulated) { 3196 io_type = SPDK_BDEV_IO_TYPE_READ; 3197 } else { 3198 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3199 } 3200 3201 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3202 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3203 3204 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3205 3206 spdk_bdev_initialize(bdev_init_cb, NULL); 3207 fn_table.submit_request = stub_submit_request_get_buf; 3208 bdev = allocate_bdev("bdev"); 3209 3210 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3211 CU_ASSERT_EQUAL(rc, 0); 3212 SPDK_CU_ASSERT_FATAL(desc != NULL); 3213 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3214 ioch = spdk_bdev_get_io_channel(desc); 3215 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3216 3217 fn_table.submit_request = stub_submit_request_get_buf; 3218 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3219 3220 offset = 50; 3221 num_blocks = 1; 3222 compare_iov.iov_base = aa_buf; 3223 compare_iov.iov_len = sizeof(aa_buf); 3224 3225 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3226 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3227 3228 g_io_done = false; 3229 g_compare_read_buf = aa_buf; 3230 g_compare_read_buf_len = sizeof(aa_buf); 3231 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3232 CU_ASSERT_EQUAL(rc, 0); 3233 num_completed = stub_complete_io(1); 3234 CU_ASSERT_EQUAL(num_completed, 1); 3235 CU_ASSERT(g_io_done == true); 3236 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3237 3238 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3239 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3240 3241 g_io_done = false; 3242 g_compare_read_buf = bb_buf; 3243 g_compare_read_buf_len = sizeof(bb_buf); 3244 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3245 CU_ASSERT_EQUAL(rc, 0); 3246 num_completed = stub_complete_io(1); 3247 CU_ASSERT_EQUAL(num_completed, 1); 3248 CU_ASSERT(g_io_done == true); 3249 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3250 3251 spdk_put_io_channel(ioch); 3252 spdk_bdev_close(desc); 3253 free_bdev(bdev); 3254 fn_table.submit_request = stub_submit_request; 3255 spdk_bdev_finish(bdev_fini_cb, NULL); 3256 poll_threads(); 3257 3258 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3259 3260 g_compare_read_buf = NULL; 3261 } 3262 3263 static void 3264 bdev_compare(void) 3265 { 3266 _bdev_compare(true); 3267 _bdev_compare(false); 3268 } 3269 3270 static void 3271 bdev_compare_and_write(void) 3272 { 3273 struct spdk_bdev *bdev; 3274 struct spdk_bdev_desc *desc = NULL; 3275 struct spdk_io_channel *ioch; 3276 struct ut_expected_io *expected_io; 3277 uint64_t offset, num_blocks; 3278 uint32_t num_completed; 3279 char aa_buf[512]; 3280 char bb_buf[512]; 3281 char cc_buf[512]; 3282 char write_buf[512]; 3283 struct iovec compare_iov; 3284 struct iovec write_iov; 3285 int rc; 3286 3287 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3288 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3289 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3290 3291 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3292 3293 spdk_bdev_initialize(bdev_init_cb, NULL); 3294 fn_table.submit_request = stub_submit_request_get_buf; 3295 bdev = allocate_bdev("bdev"); 3296 3297 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3298 CU_ASSERT_EQUAL(rc, 0); 3299 SPDK_CU_ASSERT_FATAL(desc != NULL); 3300 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3301 ioch = spdk_bdev_get_io_channel(desc); 3302 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3303 3304 fn_table.submit_request = stub_submit_request_get_buf; 3305 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3306 3307 offset = 50; 3308 num_blocks = 1; 3309 compare_iov.iov_base = aa_buf; 3310 compare_iov.iov_len = sizeof(aa_buf); 3311 write_iov.iov_base = bb_buf; 3312 write_iov.iov_len = sizeof(bb_buf); 3313 3314 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3315 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3316 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3317 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3318 3319 g_io_done = false; 3320 g_compare_read_buf = aa_buf; 3321 g_compare_read_buf_len = sizeof(aa_buf); 3322 memset(write_buf, 0, sizeof(write_buf)); 3323 g_compare_write_buf = write_buf; 3324 g_compare_write_buf_len = sizeof(write_buf); 3325 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3326 offset, num_blocks, io_done, NULL); 3327 /* Trigger range locking */ 3328 poll_threads(); 3329 CU_ASSERT_EQUAL(rc, 0); 3330 num_completed = stub_complete_io(1); 3331 CU_ASSERT_EQUAL(num_completed, 1); 3332 CU_ASSERT(g_io_done == false); 3333 num_completed = stub_complete_io(1); 3334 /* Trigger range unlocking */ 3335 poll_threads(); 3336 CU_ASSERT_EQUAL(num_completed, 1); 3337 CU_ASSERT(g_io_done == true); 3338 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3339 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3340 3341 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3342 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3343 3344 g_io_done = false; 3345 g_compare_read_buf = cc_buf; 3346 g_compare_read_buf_len = sizeof(cc_buf); 3347 memset(write_buf, 0, sizeof(write_buf)); 3348 g_compare_write_buf = write_buf; 3349 g_compare_write_buf_len = sizeof(write_buf); 3350 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3351 offset, num_blocks, io_done, NULL); 3352 /* Trigger range locking */ 3353 poll_threads(); 3354 CU_ASSERT_EQUAL(rc, 0); 3355 num_completed = stub_complete_io(1); 3356 /* Trigger range unlocking earlier because we expect error here */ 3357 poll_threads(); 3358 CU_ASSERT_EQUAL(num_completed, 1); 3359 CU_ASSERT(g_io_done == true); 3360 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3361 num_completed = stub_complete_io(1); 3362 CU_ASSERT_EQUAL(num_completed, 0); 3363 3364 spdk_put_io_channel(ioch); 3365 spdk_bdev_close(desc); 3366 free_bdev(bdev); 3367 fn_table.submit_request = stub_submit_request; 3368 spdk_bdev_finish(bdev_fini_cb, NULL); 3369 poll_threads(); 3370 3371 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3372 3373 g_compare_read_buf = NULL; 3374 g_compare_write_buf = NULL; 3375 } 3376 3377 static void 3378 bdev_write_zeroes(void) 3379 { 3380 struct spdk_bdev *bdev; 3381 struct spdk_bdev_desc *desc = NULL; 3382 struct spdk_io_channel *ioch; 3383 struct ut_expected_io *expected_io; 3384 uint64_t offset, num_io_blocks, num_blocks; 3385 uint32_t num_completed, num_requests; 3386 int rc; 3387 3388 spdk_bdev_initialize(bdev_init_cb, NULL); 3389 bdev = allocate_bdev("bdev"); 3390 3391 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3392 CU_ASSERT_EQUAL(rc, 0); 3393 SPDK_CU_ASSERT_FATAL(desc != NULL); 3394 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3395 ioch = spdk_bdev_get_io_channel(desc); 3396 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3397 3398 fn_table.submit_request = stub_submit_request; 3399 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3400 3401 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3402 bdev->md_len = 0; 3403 bdev->blocklen = 4096; 3404 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3405 3406 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3407 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3408 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3409 CU_ASSERT_EQUAL(rc, 0); 3410 num_completed = stub_complete_io(1); 3411 CU_ASSERT_EQUAL(num_completed, 1); 3412 3413 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3414 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3415 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3416 num_requests = 2; 3417 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3418 3419 for (offset = 0; offset < num_requests; ++offset) { 3420 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3421 offset * num_io_blocks, num_io_blocks, 0); 3422 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3423 } 3424 3425 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3426 CU_ASSERT_EQUAL(rc, 0); 3427 num_completed = stub_complete_io(num_requests); 3428 CU_ASSERT_EQUAL(num_completed, num_requests); 3429 3430 /* Check that the splitting is correct if bdev has interleaved metadata */ 3431 bdev->md_interleave = true; 3432 bdev->md_len = 64; 3433 bdev->blocklen = 4096 + 64; 3434 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3435 3436 num_requests = offset = 0; 3437 while (offset < num_blocks) { 3438 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3439 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3440 offset, num_io_blocks, 0); 3441 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3442 offset += num_io_blocks; 3443 num_requests++; 3444 } 3445 3446 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3447 CU_ASSERT_EQUAL(rc, 0); 3448 num_completed = stub_complete_io(num_requests); 3449 CU_ASSERT_EQUAL(num_completed, num_requests); 3450 num_completed = stub_complete_io(num_requests); 3451 assert(num_completed == 0); 3452 3453 /* Check the the same for separate metadata buffer */ 3454 bdev->md_interleave = false; 3455 bdev->md_len = 64; 3456 bdev->blocklen = 4096; 3457 3458 num_requests = offset = 0; 3459 while (offset < num_blocks) { 3460 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3461 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3462 offset, num_io_blocks, 0); 3463 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3464 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3465 offset += num_io_blocks; 3466 num_requests++; 3467 } 3468 3469 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3470 CU_ASSERT_EQUAL(rc, 0); 3471 num_completed = stub_complete_io(num_requests); 3472 CU_ASSERT_EQUAL(num_completed, num_requests); 3473 3474 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3475 spdk_put_io_channel(ioch); 3476 spdk_bdev_close(desc); 3477 free_bdev(bdev); 3478 spdk_bdev_finish(bdev_fini_cb, NULL); 3479 poll_threads(); 3480 } 3481 3482 static void 3483 bdev_zcopy_write(void) 3484 { 3485 struct spdk_bdev *bdev; 3486 struct spdk_bdev_desc *desc = NULL; 3487 struct spdk_io_channel *ioch; 3488 struct ut_expected_io *expected_io; 3489 uint64_t offset, num_blocks; 3490 uint32_t num_completed; 3491 char aa_buf[512]; 3492 struct iovec iov; 3493 int rc; 3494 const bool populate = false; 3495 const bool commit = true; 3496 3497 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3498 3499 spdk_bdev_initialize(bdev_init_cb, NULL); 3500 bdev = allocate_bdev("bdev"); 3501 3502 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3503 CU_ASSERT_EQUAL(rc, 0); 3504 SPDK_CU_ASSERT_FATAL(desc != NULL); 3505 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3506 ioch = spdk_bdev_get_io_channel(desc); 3507 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3508 3509 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3510 3511 offset = 50; 3512 num_blocks = 1; 3513 iov.iov_base = NULL; 3514 iov.iov_len = 0; 3515 3516 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3517 g_zcopy_read_buf_len = (uint32_t) -1; 3518 /* Do a zcopy start for a write (populate=false) */ 3519 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3520 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3521 g_io_done = false; 3522 g_zcopy_write_buf = aa_buf; 3523 g_zcopy_write_buf_len = sizeof(aa_buf); 3524 g_zcopy_bdev_io = NULL; 3525 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3526 CU_ASSERT_EQUAL(rc, 0); 3527 num_completed = stub_complete_io(1); 3528 CU_ASSERT_EQUAL(num_completed, 1); 3529 CU_ASSERT(g_io_done == true); 3530 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3531 /* Check that the iov has been set up */ 3532 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3533 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3534 /* Check that the bdev_io has been saved */ 3535 CU_ASSERT(g_zcopy_bdev_io != NULL); 3536 /* Now do the zcopy end for a write (commit=true) */ 3537 g_io_done = false; 3538 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3539 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3540 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3541 CU_ASSERT_EQUAL(rc, 0); 3542 num_completed = stub_complete_io(1); 3543 CU_ASSERT_EQUAL(num_completed, 1); 3544 CU_ASSERT(g_io_done == true); 3545 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3546 /* Check the g_zcopy are reset by io_done */ 3547 CU_ASSERT(g_zcopy_write_buf == NULL); 3548 CU_ASSERT(g_zcopy_write_buf_len == 0); 3549 /* Check that io_done has freed the g_zcopy_bdev_io */ 3550 CU_ASSERT(g_zcopy_bdev_io == NULL); 3551 3552 /* Check the zcopy read buffer has not been touched which 3553 * ensures that the correct buffers were used. 3554 */ 3555 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3556 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3557 3558 spdk_put_io_channel(ioch); 3559 spdk_bdev_close(desc); 3560 free_bdev(bdev); 3561 spdk_bdev_finish(bdev_fini_cb, NULL); 3562 poll_threads(); 3563 } 3564 3565 static void 3566 bdev_zcopy_read(void) 3567 { 3568 struct spdk_bdev *bdev; 3569 struct spdk_bdev_desc *desc = NULL; 3570 struct spdk_io_channel *ioch; 3571 struct ut_expected_io *expected_io; 3572 uint64_t offset, num_blocks; 3573 uint32_t num_completed; 3574 char aa_buf[512]; 3575 struct iovec iov; 3576 int rc; 3577 const bool populate = true; 3578 const bool commit = false; 3579 3580 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3581 3582 spdk_bdev_initialize(bdev_init_cb, NULL); 3583 bdev = allocate_bdev("bdev"); 3584 3585 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3586 CU_ASSERT_EQUAL(rc, 0); 3587 SPDK_CU_ASSERT_FATAL(desc != NULL); 3588 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3589 ioch = spdk_bdev_get_io_channel(desc); 3590 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3591 3592 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3593 3594 offset = 50; 3595 num_blocks = 1; 3596 iov.iov_base = NULL; 3597 iov.iov_len = 0; 3598 3599 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3600 g_zcopy_write_buf_len = (uint32_t) -1; 3601 3602 /* Do a zcopy start for a read (populate=true) */ 3603 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3604 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3605 g_io_done = false; 3606 g_zcopy_read_buf = aa_buf; 3607 g_zcopy_read_buf_len = sizeof(aa_buf); 3608 g_zcopy_bdev_io = NULL; 3609 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3610 CU_ASSERT_EQUAL(rc, 0); 3611 num_completed = stub_complete_io(1); 3612 CU_ASSERT_EQUAL(num_completed, 1); 3613 CU_ASSERT(g_io_done == true); 3614 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3615 /* Check that the iov has been set up */ 3616 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3617 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3618 /* Check that the bdev_io has been saved */ 3619 CU_ASSERT(g_zcopy_bdev_io != NULL); 3620 3621 /* Now do the zcopy end for a read (commit=false) */ 3622 g_io_done = false; 3623 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3624 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3625 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3626 CU_ASSERT_EQUAL(rc, 0); 3627 num_completed = stub_complete_io(1); 3628 CU_ASSERT_EQUAL(num_completed, 1); 3629 CU_ASSERT(g_io_done == true); 3630 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3631 /* Check the g_zcopy are reset by io_done */ 3632 CU_ASSERT(g_zcopy_read_buf == NULL); 3633 CU_ASSERT(g_zcopy_read_buf_len == 0); 3634 /* Check that io_done has freed the g_zcopy_bdev_io */ 3635 CU_ASSERT(g_zcopy_bdev_io == NULL); 3636 3637 /* Check the zcopy write buffer has not been touched which 3638 * ensures that the correct buffers were used. 3639 */ 3640 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3641 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3642 3643 spdk_put_io_channel(ioch); 3644 spdk_bdev_close(desc); 3645 free_bdev(bdev); 3646 spdk_bdev_finish(bdev_fini_cb, NULL); 3647 poll_threads(); 3648 } 3649 3650 static void 3651 bdev_open_while_hotremove(void) 3652 { 3653 struct spdk_bdev *bdev; 3654 struct spdk_bdev_desc *desc[2] = {}; 3655 int rc; 3656 3657 bdev = allocate_bdev("bdev"); 3658 3659 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3660 CU_ASSERT(rc == 0); 3661 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3662 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3663 3664 spdk_bdev_unregister(bdev, NULL, NULL); 3665 3666 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3667 CU_ASSERT(rc == -ENODEV); 3668 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3669 3670 spdk_bdev_close(desc[0]); 3671 free_bdev(bdev); 3672 } 3673 3674 static void 3675 bdev_close_while_hotremove(void) 3676 { 3677 struct spdk_bdev *bdev; 3678 struct spdk_bdev_desc *desc = NULL; 3679 int rc = 0; 3680 3681 bdev = allocate_bdev("bdev"); 3682 3683 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3684 CU_ASSERT_EQUAL(rc, 0); 3685 SPDK_CU_ASSERT_FATAL(desc != NULL); 3686 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3687 3688 /* Simulate hot-unplug by unregistering bdev */ 3689 g_event_type1 = 0xFF; 3690 g_unregister_arg = NULL; 3691 g_unregister_rc = -1; 3692 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3693 /* Close device while remove event is in flight */ 3694 spdk_bdev_close(desc); 3695 3696 /* Ensure that unregister callback is delayed */ 3697 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3698 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3699 3700 poll_threads(); 3701 3702 /* Event callback shall not be issued because device was closed */ 3703 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3704 /* Unregister callback is issued */ 3705 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3706 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3707 3708 free_bdev(bdev); 3709 } 3710 3711 static void 3712 bdev_open_ext(void) 3713 { 3714 struct spdk_bdev *bdev; 3715 struct spdk_bdev_desc *desc1 = NULL; 3716 struct spdk_bdev_desc *desc2 = NULL; 3717 int rc = 0; 3718 3719 bdev = allocate_bdev("bdev"); 3720 3721 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3722 CU_ASSERT_EQUAL(rc, -EINVAL); 3723 3724 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3725 CU_ASSERT_EQUAL(rc, 0); 3726 3727 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3728 CU_ASSERT_EQUAL(rc, 0); 3729 3730 g_event_type1 = 0xFF; 3731 g_event_type2 = 0xFF; 3732 3733 /* Simulate hot-unplug by unregistering bdev */ 3734 spdk_bdev_unregister(bdev, NULL, NULL); 3735 poll_threads(); 3736 3737 /* Check if correct events have been triggered in event callback fn */ 3738 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3739 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3740 3741 free_bdev(bdev); 3742 poll_threads(); 3743 } 3744 3745 struct timeout_io_cb_arg { 3746 struct iovec iov; 3747 uint8_t type; 3748 }; 3749 3750 static int 3751 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3752 { 3753 struct spdk_bdev_io *bdev_io; 3754 int n = 0; 3755 3756 if (!ch) { 3757 return -1; 3758 } 3759 3760 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3761 n++; 3762 } 3763 3764 return n; 3765 } 3766 3767 static void 3768 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3769 { 3770 struct timeout_io_cb_arg *ctx = cb_arg; 3771 3772 ctx->type = bdev_io->type; 3773 ctx->iov.iov_base = bdev_io->iov.iov_base; 3774 ctx->iov.iov_len = bdev_io->iov.iov_len; 3775 } 3776 3777 static void 3778 bdev_set_io_timeout(void) 3779 { 3780 struct spdk_bdev *bdev; 3781 struct spdk_bdev_desc *desc = NULL; 3782 struct spdk_io_channel *io_ch = NULL; 3783 struct spdk_bdev_channel *bdev_ch = NULL; 3784 struct timeout_io_cb_arg cb_arg; 3785 3786 spdk_bdev_initialize(bdev_init_cb, NULL); 3787 3788 bdev = allocate_bdev("bdev"); 3789 3790 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3791 SPDK_CU_ASSERT_FATAL(desc != NULL); 3792 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3793 3794 io_ch = spdk_bdev_get_io_channel(desc); 3795 CU_ASSERT(io_ch != NULL); 3796 3797 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3798 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3799 3800 /* This is the part1. 3801 * We will check the bdev_ch->io_submitted list 3802 * TO make sure that it can link IOs and only the user submitted IOs 3803 */ 3804 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3805 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3806 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3807 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3808 stub_complete_io(1); 3809 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3810 stub_complete_io(1); 3811 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3812 3813 /* Split IO */ 3814 bdev->optimal_io_boundary = 16; 3815 bdev->split_on_optimal_io_boundary = true; 3816 3817 /* Now test that a single-vector command is split correctly. 3818 * Offset 14, length 8, payload 0xF000 3819 * Child - Offset 14, length 2, payload 0xF000 3820 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3821 * 3822 * Set up the expected values before calling spdk_bdev_read_blocks 3823 */ 3824 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3825 /* We count all submitted IOs including IO that are generated by splitting. */ 3826 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 3827 stub_complete_io(1); 3828 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3829 stub_complete_io(1); 3830 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3831 3832 /* Also include the reset IO */ 3833 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3834 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3835 poll_threads(); 3836 stub_complete_io(1); 3837 poll_threads(); 3838 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3839 3840 /* This is part2 3841 * Test the desc timeout poller register 3842 */ 3843 3844 /* Successfully set the timeout */ 3845 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3846 CU_ASSERT(desc->io_timeout_poller != NULL); 3847 CU_ASSERT(desc->timeout_in_sec == 30); 3848 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3849 CU_ASSERT(desc->cb_arg == &cb_arg); 3850 3851 /* Change the timeout limit */ 3852 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3853 CU_ASSERT(desc->io_timeout_poller != NULL); 3854 CU_ASSERT(desc->timeout_in_sec == 20); 3855 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3856 CU_ASSERT(desc->cb_arg == &cb_arg); 3857 3858 /* Disable the timeout */ 3859 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 3860 CU_ASSERT(desc->io_timeout_poller == NULL); 3861 3862 /* This the part3 3863 * We will test to catch timeout IO and check whether the IO is 3864 * the submitted one. 3865 */ 3866 memset(&cb_arg, 0, sizeof(cb_arg)); 3867 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3868 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 3869 3870 /* Don't reach the limit */ 3871 spdk_delay_us(15 * spdk_get_ticks_hz()); 3872 poll_threads(); 3873 CU_ASSERT(cb_arg.type == 0); 3874 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3875 CU_ASSERT(cb_arg.iov.iov_len == 0); 3876 3877 /* 15 + 15 = 30 reach the limit */ 3878 spdk_delay_us(15 * spdk_get_ticks_hz()); 3879 poll_threads(); 3880 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3881 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 3882 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 3883 stub_complete_io(1); 3884 3885 /* Use the same split IO above and check the IO */ 3886 memset(&cb_arg, 0, sizeof(cb_arg)); 3887 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3888 3889 /* The first child complete in time */ 3890 spdk_delay_us(15 * spdk_get_ticks_hz()); 3891 poll_threads(); 3892 stub_complete_io(1); 3893 CU_ASSERT(cb_arg.type == 0); 3894 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3895 CU_ASSERT(cb_arg.iov.iov_len == 0); 3896 3897 /* The second child reach the limit */ 3898 spdk_delay_us(15 * spdk_get_ticks_hz()); 3899 poll_threads(); 3900 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3901 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 3902 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 3903 stub_complete_io(1); 3904 3905 /* Also include the reset IO */ 3906 memset(&cb_arg, 0, sizeof(cb_arg)); 3907 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3908 spdk_delay_us(30 * spdk_get_ticks_hz()); 3909 poll_threads(); 3910 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 3911 stub_complete_io(1); 3912 poll_threads(); 3913 3914 spdk_put_io_channel(io_ch); 3915 spdk_bdev_close(desc); 3916 free_bdev(bdev); 3917 spdk_bdev_finish(bdev_fini_cb, NULL); 3918 poll_threads(); 3919 } 3920 3921 static void 3922 lba_range_overlap(void) 3923 { 3924 struct lba_range r1, r2; 3925 3926 r1.offset = 100; 3927 r1.length = 50; 3928 3929 r2.offset = 0; 3930 r2.length = 1; 3931 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3932 3933 r2.offset = 0; 3934 r2.length = 100; 3935 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3936 3937 r2.offset = 0; 3938 r2.length = 110; 3939 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3940 3941 r2.offset = 100; 3942 r2.length = 10; 3943 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3944 3945 r2.offset = 110; 3946 r2.length = 20; 3947 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3948 3949 r2.offset = 140; 3950 r2.length = 150; 3951 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3952 3953 r2.offset = 130; 3954 r2.length = 200; 3955 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3956 3957 r2.offset = 150; 3958 r2.length = 100; 3959 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3960 3961 r2.offset = 110; 3962 r2.length = 0; 3963 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3964 } 3965 3966 static bool g_lock_lba_range_done; 3967 static bool g_unlock_lba_range_done; 3968 3969 static void 3970 lock_lba_range_done(void *ctx, int status) 3971 { 3972 g_lock_lba_range_done = true; 3973 } 3974 3975 static void 3976 unlock_lba_range_done(void *ctx, int status) 3977 { 3978 g_unlock_lba_range_done = true; 3979 } 3980 3981 static void 3982 lock_lba_range_check_ranges(void) 3983 { 3984 struct spdk_bdev *bdev; 3985 struct spdk_bdev_desc *desc = NULL; 3986 struct spdk_io_channel *io_ch; 3987 struct spdk_bdev_channel *channel; 3988 struct lba_range *range; 3989 int ctx1; 3990 int rc; 3991 3992 spdk_bdev_initialize(bdev_init_cb, NULL); 3993 3994 bdev = allocate_bdev("bdev0"); 3995 3996 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3997 CU_ASSERT(rc == 0); 3998 CU_ASSERT(desc != NULL); 3999 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4000 io_ch = spdk_bdev_get_io_channel(desc); 4001 CU_ASSERT(io_ch != NULL); 4002 channel = spdk_io_channel_get_ctx(io_ch); 4003 4004 g_lock_lba_range_done = false; 4005 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4006 CU_ASSERT(rc == 0); 4007 poll_threads(); 4008 4009 CU_ASSERT(g_lock_lba_range_done == true); 4010 range = TAILQ_FIRST(&channel->locked_ranges); 4011 SPDK_CU_ASSERT_FATAL(range != NULL); 4012 CU_ASSERT(range->offset == 20); 4013 CU_ASSERT(range->length == 10); 4014 CU_ASSERT(range->owner_ch == channel); 4015 4016 /* Unlocks must exactly match a lock. */ 4017 g_unlock_lba_range_done = false; 4018 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4019 CU_ASSERT(rc == -EINVAL); 4020 CU_ASSERT(g_unlock_lba_range_done == false); 4021 4022 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4023 CU_ASSERT(rc == 0); 4024 spdk_delay_us(100); 4025 poll_threads(); 4026 4027 CU_ASSERT(g_unlock_lba_range_done == true); 4028 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4029 4030 spdk_put_io_channel(io_ch); 4031 spdk_bdev_close(desc); 4032 free_bdev(bdev); 4033 spdk_bdev_finish(bdev_fini_cb, NULL); 4034 poll_threads(); 4035 } 4036 4037 static void 4038 lock_lba_range_with_io_outstanding(void) 4039 { 4040 struct spdk_bdev *bdev; 4041 struct spdk_bdev_desc *desc = NULL; 4042 struct spdk_io_channel *io_ch; 4043 struct spdk_bdev_channel *channel; 4044 struct lba_range *range; 4045 char buf[4096]; 4046 int ctx1; 4047 int rc; 4048 4049 spdk_bdev_initialize(bdev_init_cb, NULL); 4050 4051 bdev = allocate_bdev("bdev0"); 4052 4053 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4054 CU_ASSERT(rc == 0); 4055 CU_ASSERT(desc != NULL); 4056 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4057 io_ch = spdk_bdev_get_io_channel(desc); 4058 CU_ASSERT(io_ch != NULL); 4059 channel = spdk_io_channel_get_ctx(io_ch); 4060 4061 g_io_done = false; 4062 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4063 CU_ASSERT(rc == 0); 4064 4065 g_lock_lba_range_done = false; 4066 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4067 CU_ASSERT(rc == 0); 4068 poll_threads(); 4069 4070 /* The lock should immediately become valid, since there are no outstanding 4071 * write I/O. 4072 */ 4073 CU_ASSERT(g_io_done == false); 4074 CU_ASSERT(g_lock_lba_range_done == true); 4075 range = TAILQ_FIRST(&channel->locked_ranges); 4076 SPDK_CU_ASSERT_FATAL(range != NULL); 4077 CU_ASSERT(range->offset == 20); 4078 CU_ASSERT(range->length == 10); 4079 CU_ASSERT(range->owner_ch == channel); 4080 CU_ASSERT(range->locked_ctx == &ctx1); 4081 4082 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4083 CU_ASSERT(rc == 0); 4084 stub_complete_io(1); 4085 spdk_delay_us(100); 4086 poll_threads(); 4087 4088 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4089 4090 /* Now try again, but with a write I/O. */ 4091 g_io_done = false; 4092 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4093 CU_ASSERT(rc == 0); 4094 4095 g_lock_lba_range_done = false; 4096 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4097 CU_ASSERT(rc == 0); 4098 poll_threads(); 4099 4100 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4101 * But note that the range should be on the channel's locked_list, to make sure no 4102 * new write I/O are started. 4103 */ 4104 CU_ASSERT(g_io_done == false); 4105 CU_ASSERT(g_lock_lba_range_done == false); 4106 range = TAILQ_FIRST(&channel->locked_ranges); 4107 SPDK_CU_ASSERT_FATAL(range != NULL); 4108 CU_ASSERT(range->offset == 20); 4109 CU_ASSERT(range->length == 10); 4110 4111 /* Complete the write I/O. This should make the lock valid (checked by confirming 4112 * our callback was invoked). 4113 */ 4114 stub_complete_io(1); 4115 spdk_delay_us(100); 4116 poll_threads(); 4117 CU_ASSERT(g_io_done == true); 4118 CU_ASSERT(g_lock_lba_range_done == true); 4119 4120 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4121 CU_ASSERT(rc == 0); 4122 poll_threads(); 4123 4124 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4125 4126 spdk_put_io_channel(io_ch); 4127 spdk_bdev_close(desc); 4128 free_bdev(bdev); 4129 spdk_bdev_finish(bdev_fini_cb, NULL); 4130 poll_threads(); 4131 } 4132 4133 static void 4134 lock_lba_range_overlapped(void) 4135 { 4136 struct spdk_bdev *bdev; 4137 struct spdk_bdev_desc *desc = NULL; 4138 struct spdk_io_channel *io_ch; 4139 struct spdk_bdev_channel *channel; 4140 struct lba_range *range; 4141 int ctx1; 4142 int rc; 4143 4144 spdk_bdev_initialize(bdev_init_cb, NULL); 4145 4146 bdev = allocate_bdev("bdev0"); 4147 4148 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4149 CU_ASSERT(rc == 0); 4150 CU_ASSERT(desc != NULL); 4151 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4152 io_ch = spdk_bdev_get_io_channel(desc); 4153 CU_ASSERT(io_ch != NULL); 4154 channel = spdk_io_channel_get_ctx(io_ch); 4155 4156 /* Lock range 20-29. */ 4157 g_lock_lba_range_done = false; 4158 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4159 CU_ASSERT(rc == 0); 4160 poll_threads(); 4161 4162 CU_ASSERT(g_lock_lba_range_done == true); 4163 range = TAILQ_FIRST(&channel->locked_ranges); 4164 SPDK_CU_ASSERT_FATAL(range != NULL); 4165 CU_ASSERT(range->offset == 20); 4166 CU_ASSERT(range->length == 10); 4167 4168 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4169 * 20-29. 4170 */ 4171 g_lock_lba_range_done = false; 4172 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4173 CU_ASSERT(rc == 0); 4174 poll_threads(); 4175 4176 CU_ASSERT(g_lock_lba_range_done == false); 4177 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4178 SPDK_CU_ASSERT_FATAL(range != NULL); 4179 CU_ASSERT(range->offset == 25); 4180 CU_ASSERT(range->length == 15); 4181 4182 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4183 * no longer overlaps with an active lock. 4184 */ 4185 g_unlock_lba_range_done = false; 4186 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4187 CU_ASSERT(rc == 0); 4188 poll_threads(); 4189 4190 CU_ASSERT(g_unlock_lba_range_done == true); 4191 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4192 range = TAILQ_FIRST(&channel->locked_ranges); 4193 SPDK_CU_ASSERT_FATAL(range != NULL); 4194 CU_ASSERT(range->offset == 25); 4195 CU_ASSERT(range->length == 15); 4196 4197 /* Lock 40-59. This should immediately lock since it does not overlap with the 4198 * currently active 25-39 lock. 4199 */ 4200 g_lock_lba_range_done = false; 4201 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4202 CU_ASSERT(rc == 0); 4203 poll_threads(); 4204 4205 CU_ASSERT(g_lock_lba_range_done == true); 4206 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4207 SPDK_CU_ASSERT_FATAL(range != NULL); 4208 range = TAILQ_NEXT(range, tailq); 4209 SPDK_CU_ASSERT_FATAL(range != NULL); 4210 CU_ASSERT(range->offset == 40); 4211 CU_ASSERT(range->length == 20); 4212 4213 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4214 g_lock_lba_range_done = false; 4215 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4216 CU_ASSERT(rc == 0); 4217 poll_threads(); 4218 4219 CU_ASSERT(g_lock_lba_range_done == false); 4220 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4221 SPDK_CU_ASSERT_FATAL(range != NULL); 4222 CU_ASSERT(range->offset == 35); 4223 CU_ASSERT(range->length == 10); 4224 4225 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4226 * the 40-59 lock is still active. 4227 */ 4228 g_unlock_lba_range_done = false; 4229 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4230 CU_ASSERT(rc == 0); 4231 poll_threads(); 4232 4233 CU_ASSERT(g_unlock_lba_range_done == true); 4234 CU_ASSERT(g_lock_lba_range_done == false); 4235 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4236 SPDK_CU_ASSERT_FATAL(range != NULL); 4237 CU_ASSERT(range->offset == 35); 4238 CU_ASSERT(range->length == 10); 4239 4240 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4241 * no longer any active overlapping locks. 4242 */ 4243 g_unlock_lba_range_done = false; 4244 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4245 CU_ASSERT(rc == 0); 4246 poll_threads(); 4247 4248 CU_ASSERT(g_unlock_lba_range_done == true); 4249 CU_ASSERT(g_lock_lba_range_done == true); 4250 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4251 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4252 SPDK_CU_ASSERT_FATAL(range != NULL); 4253 CU_ASSERT(range->offset == 35); 4254 CU_ASSERT(range->length == 10); 4255 4256 /* Finally, unlock 35-44. */ 4257 g_unlock_lba_range_done = false; 4258 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4259 CU_ASSERT(rc == 0); 4260 poll_threads(); 4261 4262 CU_ASSERT(g_unlock_lba_range_done == true); 4263 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4264 4265 spdk_put_io_channel(io_ch); 4266 spdk_bdev_close(desc); 4267 free_bdev(bdev); 4268 spdk_bdev_finish(bdev_fini_cb, NULL); 4269 poll_threads(); 4270 } 4271 4272 static void 4273 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4274 { 4275 g_abort_done = true; 4276 g_abort_status = bdev_io->internal.status; 4277 spdk_bdev_free_io(bdev_io); 4278 } 4279 4280 static void 4281 bdev_io_abort(void) 4282 { 4283 struct spdk_bdev *bdev; 4284 struct spdk_bdev_desc *desc = NULL; 4285 struct spdk_io_channel *io_ch; 4286 struct spdk_bdev_channel *channel; 4287 struct spdk_bdev_mgmt_channel *mgmt_ch; 4288 struct spdk_bdev_opts bdev_opts = {}; 4289 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4290 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4291 int rc; 4292 4293 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4294 bdev_opts.bdev_io_pool_size = 7; 4295 bdev_opts.bdev_io_cache_size = 2; 4296 4297 rc = spdk_bdev_set_opts(&bdev_opts); 4298 CU_ASSERT(rc == 0); 4299 spdk_bdev_initialize(bdev_init_cb, NULL); 4300 4301 bdev = allocate_bdev("bdev0"); 4302 4303 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4304 CU_ASSERT(rc == 0); 4305 CU_ASSERT(desc != NULL); 4306 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4307 io_ch = spdk_bdev_get_io_channel(desc); 4308 CU_ASSERT(io_ch != NULL); 4309 channel = spdk_io_channel_get_ctx(io_ch); 4310 mgmt_ch = channel->shared_resource->mgmt_ch; 4311 4312 g_abort_done = false; 4313 4314 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4315 4316 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4317 CU_ASSERT(rc == -ENOTSUP); 4318 4319 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4320 4321 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4322 CU_ASSERT(rc == 0); 4323 CU_ASSERT(g_abort_done == true); 4324 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4325 4326 /* Test the case that the target I/O was successfully aborted. */ 4327 g_io_done = false; 4328 4329 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4330 CU_ASSERT(rc == 0); 4331 CU_ASSERT(g_io_done == false); 4332 4333 g_abort_done = false; 4334 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4335 4336 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4337 CU_ASSERT(rc == 0); 4338 CU_ASSERT(g_io_done == true); 4339 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4340 stub_complete_io(1); 4341 CU_ASSERT(g_abort_done == true); 4342 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4343 4344 /* Test the case that the target I/O was not aborted because it completed 4345 * in the middle of execution of the abort. 4346 */ 4347 g_io_done = false; 4348 4349 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4350 CU_ASSERT(rc == 0); 4351 CU_ASSERT(g_io_done == false); 4352 4353 g_abort_done = false; 4354 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4355 4356 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4357 CU_ASSERT(rc == 0); 4358 CU_ASSERT(g_io_done == false); 4359 4360 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4361 stub_complete_io(1); 4362 CU_ASSERT(g_io_done == true); 4363 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4364 4365 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4366 stub_complete_io(1); 4367 CU_ASSERT(g_abort_done == true); 4368 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4369 4370 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4371 4372 bdev->optimal_io_boundary = 16; 4373 bdev->split_on_optimal_io_boundary = true; 4374 4375 /* Test that a single-vector command which is split is aborted correctly. 4376 * Offset 14, length 8, payload 0xF000 4377 * Child - Offset 14, length 2, payload 0xF000 4378 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4379 */ 4380 g_io_done = false; 4381 4382 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4383 CU_ASSERT(rc == 0); 4384 CU_ASSERT(g_io_done == false); 4385 4386 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4387 4388 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4389 4390 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4391 CU_ASSERT(rc == 0); 4392 CU_ASSERT(g_io_done == true); 4393 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4394 stub_complete_io(2); 4395 CU_ASSERT(g_abort_done == true); 4396 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4397 4398 /* Test that a multi-vector command that needs to be split by strip and then 4399 * needs to be split is aborted correctly. Abort is requested before the second 4400 * child I/O was submitted. The parent I/O should complete with failure without 4401 * submitting the second child I/O. 4402 */ 4403 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4404 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4405 iov[i].iov_len = 512; 4406 } 4407 4408 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4409 g_io_done = false; 4410 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4411 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4412 CU_ASSERT(rc == 0); 4413 CU_ASSERT(g_io_done == false); 4414 4415 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4416 4417 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4418 4419 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4420 CU_ASSERT(rc == 0); 4421 CU_ASSERT(g_io_done == true); 4422 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4423 stub_complete_io(1); 4424 CU_ASSERT(g_abort_done == true); 4425 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4426 4427 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4428 4429 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4430 4431 bdev->optimal_io_boundary = 16; 4432 g_io_done = false; 4433 4434 /* Test that a ingle-vector command which is split is aborted correctly. 4435 * Differently from the above, the child abort request will be submitted 4436 * sequentially due to the capacity of spdk_bdev_io. 4437 */ 4438 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4439 CU_ASSERT(rc == 0); 4440 CU_ASSERT(g_io_done == false); 4441 4442 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4443 4444 g_abort_done = false; 4445 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4446 4447 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4448 CU_ASSERT(rc == 0); 4449 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4450 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4451 4452 stub_complete_io(1); 4453 CU_ASSERT(g_io_done == true); 4454 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4455 stub_complete_io(3); 4456 CU_ASSERT(g_abort_done == true); 4457 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4458 4459 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4460 4461 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4462 4463 spdk_put_io_channel(io_ch); 4464 spdk_bdev_close(desc); 4465 free_bdev(bdev); 4466 spdk_bdev_finish(bdev_fini_cb, NULL); 4467 poll_threads(); 4468 } 4469 4470 static void 4471 bdev_unmap(void) 4472 { 4473 struct spdk_bdev *bdev; 4474 struct spdk_bdev_desc *desc = NULL; 4475 struct spdk_io_channel *ioch; 4476 struct spdk_bdev_channel *bdev_ch; 4477 struct ut_expected_io *expected_io; 4478 struct spdk_bdev_opts bdev_opts = {}; 4479 uint32_t i, num_outstanding; 4480 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4481 int rc; 4482 4483 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4484 bdev_opts.bdev_io_pool_size = 512; 4485 bdev_opts.bdev_io_cache_size = 64; 4486 rc = spdk_bdev_set_opts(&bdev_opts); 4487 CU_ASSERT(rc == 0); 4488 4489 spdk_bdev_initialize(bdev_init_cb, NULL); 4490 bdev = allocate_bdev("bdev"); 4491 4492 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4493 CU_ASSERT_EQUAL(rc, 0); 4494 SPDK_CU_ASSERT_FATAL(desc != NULL); 4495 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4496 ioch = spdk_bdev_get_io_channel(desc); 4497 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4498 bdev_ch = spdk_io_channel_get_ctx(ioch); 4499 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4500 4501 fn_table.submit_request = stub_submit_request; 4502 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4503 4504 /* Case 1: First test the request won't be split */ 4505 num_blocks = 32; 4506 4507 g_io_done = false; 4508 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4509 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4510 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4511 CU_ASSERT_EQUAL(rc, 0); 4512 CU_ASSERT(g_io_done == false); 4513 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4514 stub_complete_io(1); 4515 CU_ASSERT(g_io_done == true); 4516 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4517 4518 /* Case 2: Test the split with 2 children requests */ 4519 bdev->max_unmap = 8; 4520 bdev->max_unmap_segments = 2; 4521 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4522 num_blocks = max_unmap_blocks * 2; 4523 offset = 0; 4524 4525 g_io_done = false; 4526 for (i = 0; i < 2; i++) { 4527 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4528 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4529 offset += max_unmap_blocks; 4530 } 4531 4532 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4533 CU_ASSERT_EQUAL(rc, 0); 4534 CU_ASSERT(g_io_done == false); 4535 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4536 stub_complete_io(2); 4537 CU_ASSERT(g_io_done == true); 4538 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4539 4540 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4541 num_children = 15; 4542 num_blocks = max_unmap_blocks * num_children; 4543 g_io_done = false; 4544 offset = 0; 4545 for (i = 0; i < num_children; i++) { 4546 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4547 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4548 offset += max_unmap_blocks; 4549 } 4550 4551 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4552 CU_ASSERT_EQUAL(rc, 0); 4553 CU_ASSERT(g_io_done == false); 4554 4555 while (num_children > 0) { 4556 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4557 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4558 stub_complete_io(num_outstanding); 4559 num_children -= num_outstanding; 4560 } 4561 CU_ASSERT(g_io_done == true); 4562 4563 spdk_put_io_channel(ioch); 4564 spdk_bdev_close(desc); 4565 free_bdev(bdev); 4566 spdk_bdev_finish(bdev_fini_cb, NULL); 4567 poll_threads(); 4568 } 4569 4570 static void 4571 bdev_write_zeroes_split_test(void) 4572 { 4573 struct spdk_bdev *bdev; 4574 struct spdk_bdev_desc *desc = NULL; 4575 struct spdk_io_channel *ioch; 4576 struct spdk_bdev_channel *bdev_ch; 4577 struct ut_expected_io *expected_io; 4578 struct spdk_bdev_opts bdev_opts = {}; 4579 uint32_t i, num_outstanding; 4580 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 4581 int rc; 4582 4583 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4584 bdev_opts.bdev_io_pool_size = 512; 4585 bdev_opts.bdev_io_cache_size = 64; 4586 rc = spdk_bdev_set_opts(&bdev_opts); 4587 CU_ASSERT(rc == 0); 4588 4589 spdk_bdev_initialize(bdev_init_cb, NULL); 4590 bdev = allocate_bdev("bdev"); 4591 4592 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4593 CU_ASSERT_EQUAL(rc, 0); 4594 SPDK_CU_ASSERT_FATAL(desc != NULL); 4595 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4596 ioch = spdk_bdev_get_io_channel(desc); 4597 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4598 bdev_ch = spdk_io_channel_get_ctx(ioch); 4599 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4600 4601 fn_table.submit_request = stub_submit_request; 4602 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4603 4604 /* Case 1: First test the request won't be split */ 4605 num_blocks = 32; 4606 4607 g_io_done = false; 4608 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4609 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4610 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4611 CU_ASSERT_EQUAL(rc, 0); 4612 CU_ASSERT(g_io_done == false); 4613 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4614 stub_complete_io(1); 4615 CU_ASSERT(g_io_done == true); 4616 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4617 4618 /* Case 2: Test the split with 2 children requests */ 4619 max_write_zeroes_blocks = 8; 4620 bdev->max_write_zeroes = max_write_zeroes_blocks; 4621 num_blocks = max_write_zeroes_blocks * 2; 4622 offset = 0; 4623 4624 g_io_done = false; 4625 for (i = 0; i < 2; i++) { 4626 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4627 0); 4628 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4629 offset += max_write_zeroes_blocks; 4630 } 4631 4632 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4633 CU_ASSERT_EQUAL(rc, 0); 4634 CU_ASSERT(g_io_done == false); 4635 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4636 stub_complete_io(2); 4637 CU_ASSERT(g_io_done == true); 4638 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4639 4640 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4641 num_children = 15; 4642 num_blocks = max_write_zeroes_blocks * num_children; 4643 g_io_done = false; 4644 offset = 0; 4645 for (i = 0; i < num_children; i++) { 4646 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4647 0); 4648 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4649 offset += max_write_zeroes_blocks; 4650 } 4651 4652 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4653 CU_ASSERT_EQUAL(rc, 0); 4654 CU_ASSERT(g_io_done == false); 4655 4656 while (num_children > 0) { 4657 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4658 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4659 stub_complete_io(num_outstanding); 4660 num_children -= num_outstanding; 4661 } 4662 CU_ASSERT(g_io_done == true); 4663 4664 spdk_put_io_channel(ioch); 4665 spdk_bdev_close(desc); 4666 free_bdev(bdev); 4667 spdk_bdev_finish(bdev_fini_cb, NULL); 4668 poll_threads(); 4669 } 4670 4671 static void 4672 bdev_set_options_test(void) 4673 { 4674 struct spdk_bdev_opts bdev_opts = {}; 4675 int rc; 4676 4677 /* Case1: Do not set opts_size */ 4678 rc = spdk_bdev_set_opts(&bdev_opts); 4679 CU_ASSERT(rc == -1); 4680 4681 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4682 bdev_opts.bdev_io_pool_size = 4; 4683 bdev_opts.bdev_io_cache_size = 2; 4684 bdev_opts.small_buf_pool_size = 4; 4685 4686 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4687 rc = spdk_bdev_set_opts(&bdev_opts); 4688 CU_ASSERT(rc == -1); 4689 4690 /* Case 3: Do not set valid large_buf_pool_size */ 4691 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4692 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4693 rc = spdk_bdev_set_opts(&bdev_opts); 4694 CU_ASSERT(rc == -1); 4695 4696 /* Case4: set valid large buf_pool_size */ 4697 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4698 rc = spdk_bdev_set_opts(&bdev_opts); 4699 CU_ASSERT(rc == 0); 4700 4701 /* Case5: Set different valid value for small and large buf pool */ 4702 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4703 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4704 rc = spdk_bdev_set_opts(&bdev_opts); 4705 CU_ASSERT(rc == 0); 4706 } 4707 4708 static uint64_t 4709 get_ns_time(void) 4710 { 4711 int rc; 4712 struct timespec ts; 4713 4714 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 4715 CU_ASSERT(rc == 0); 4716 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 4717 } 4718 4719 static int 4720 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 4721 { 4722 int h1, h2; 4723 4724 if (bdev_name == NULL) { 4725 return -1; 4726 } else { 4727 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 4728 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 4729 4730 return spdk_max(h1, h2) + 1; 4731 } 4732 } 4733 4734 static void 4735 bdev_multi_allocation(void) 4736 { 4737 const int max_bdev_num = 1024 * 16; 4738 char name[max_bdev_num][10]; 4739 char noexist_name[] = "invalid_bdev"; 4740 struct spdk_bdev *bdev[max_bdev_num]; 4741 int i, j; 4742 uint64_t last_time; 4743 int bdev_num; 4744 int height; 4745 4746 for (j = 0; j < max_bdev_num; j++) { 4747 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 4748 } 4749 4750 for (i = 0; i < 16; i++) { 4751 last_time = get_ns_time(); 4752 bdev_num = 1024 * (i + 1); 4753 for (j = 0; j < bdev_num; j++) { 4754 bdev[j] = allocate_bdev(name[j]); 4755 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 4756 CU_ASSERT(height <= (int)(spdk_u32log2(j + 1))); 4757 } 4758 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 4759 (get_ns_time() - last_time) / 1000 / 1000); 4760 for (j = 0; j < bdev_num; j++) { 4761 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 4762 } 4763 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 4764 4765 for (j = 0; j < bdev_num; j++) { 4766 free_bdev(bdev[j]); 4767 } 4768 for (j = 0; j < bdev_num; j++) { 4769 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 4770 } 4771 } 4772 } 4773 4774 int 4775 main(int argc, char **argv) 4776 { 4777 CU_pSuite suite = NULL; 4778 unsigned int num_failures; 4779 4780 CU_set_error_action(CUEA_ABORT); 4781 CU_initialize_registry(); 4782 4783 suite = CU_add_suite("bdev", null_init, null_clean); 4784 4785 CU_ADD_TEST(suite, bytes_to_blocks_test); 4786 CU_ADD_TEST(suite, num_blocks_test); 4787 CU_ADD_TEST(suite, io_valid_test); 4788 CU_ADD_TEST(suite, open_write_test); 4789 CU_ADD_TEST(suite, alias_add_del_test); 4790 CU_ADD_TEST(suite, get_device_stat_test); 4791 CU_ADD_TEST(suite, bdev_io_types_test); 4792 CU_ADD_TEST(suite, bdev_io_wait_test); 4793 CU_ADD_TEST(suite, bdev_io_spans_split_test); 4794 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 4795 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 4796 CU_ADD_TEST(suite, bdev_io_mix_split_test); 4797 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 4798 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 4799 CU_ADD_TEST(suite, bdev_io_alignment); 4800 CU_ADD_TEST(suite, bdev_histograms); 4801 CU_ADD_TEST(suite, bdev_write_zeroes); 4802 CU_ADD_TEST(suite, bdev_compare_and_write); 4803 CU_ADD_TEST(suite, bdev_compare); 4804 CU_ADD_TEST(suite, bdev_zcopy_write); 4805 CU_ADD_TEST(suite, bdev_zcopy_read); 4806 CU_ADD_TEST(suite, bdev_open_while_hotremove); 4807 CU_ADD_TEST(suite, bdev_close_while_hotremove); 4808 CU_ADD_TEST(suite, bdev_open_ext); 4809 CU_ADD_TEST(suite, bdev_set_io_timeout); 4810 CU_ADD_TEST(suite, lba_range_overlap); 4811 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 4812 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 4813 CU_ADD_TEST(suite, lock_lba_range_overlapped); 4814 CU_ADD_TEST(suite, bdev_io_abort); 4815 CU_ADD_TEST(suite, bdev_unmap); 4816 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 4817 CU_ADD_TEST(suite, bdev_set_options_test); 4818 CU_ADD_TEST(suite, bdev_multi_allocation); 4819 4820 allocate_cores(1); 4821 allocate_threads(1); 4822 set_thread(0); 4823 4824 CU_basic_set_mode(CU_BRM_VERBOSE); 4825 CU_basic_run_tests(); 4826 num_failures = CU_get_number_of_failures(); 4827 CU_cleanup_registry(); 4828 4829 free_threads(); 4830 free_cores(); 4831 4832 return num_failures; 4833 } 4834