1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk_cunit.h" 36 37 #include "common/lib/ut_multithread.c" 38 #include "unit/lib/json_mock.c" 39 40 #include "spdk/config.h" 41 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 42 #undef SPDK_CONFIG_VTUNE 43 44 #include "bdev/bdev.c" 45 46 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 47 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 48 49 int g_status; 50 int g_count; 51 enum spdk_bdev_event_type g_event_type1; 52 enum spdk_bdev_event_type g_event_type2; 53 struct spdk_histogram_data *g_histogram; 54 void *g_unregister_arg; 55 int g_unregister_rc; 56 57 void 58 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 59 int *sc, int *sk, int *asc, int *ascq) 60 { 61 } 62 63 static int 64 null_init(void) 65 { 66 return 0; 67 } 68 69 static int 70 null_clean(void) 71 { 72 return 0; 73 } 74 75 static int 76 stub_destruct(void *ctx) 77 { 78 return 0; 79 } 80 81 struct ut_expected_io { 82 uint8_t type; 83 uint64_t offset; 84 uint64_t length; 85 int iovcnt; 86 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 87 void *md_buf; 88 struct spdk_bdev_ext_io_opts *ext_io_opts; 89 TAILQ_ENTRY(ut_expected_io) link; 90 }; 91 92 struct bdev_ut_channel { 93 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 94 uint32_t outstanding_io_count; 95 TAILQ_HEAD(, ut_expected_io) expected_io; 96 }; 97 98 static bool g_io_done; 99 static struct spdk_bdev_io *g_bdev_io; 100 static enum spdk_bdev_io_status g_io_status; 101 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 102 static uint32_t g_bdev_ut_io_device; 103 static struct bdev_ut_channel *g_bdev_ut_channel; 104 static void *g_compare_read_buf; 105 static uint32_t g_compare_read_buf_len; 106 static void *g_compare_write_buf; 107 static uint32_t g_compare_write_buf_len; 108 static bool g_abort_done; 109 static enum spdk_bdev_io_status g_abort_status; 110 static void *g_zcopy_read_buf; 111 static uint32_t g_zcopy_read_buf_len; 112 static void *g_zcopy_write_buf; 113 static uint32_t g_zcopy_write_buf_len; 114 static struct spdk_bdev_io *g_zcopy_bdev_io; 115 116 static struct ut_expected_io * 117 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 118 { 119 struct ut_expected_io *expected_io; 120 121 expected_io = calloc(1, sizeof(*expected_io)); 122 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 123 124 expected_io->type = type; 125 expected_io->offset = offset; 126 expected_io->length = length; 127 expected_io->iovcnt = iovcnt; 128 129 return expected_io; 130 } 131 132 static void 133 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 134 { 135 expected_io->iov[pos].iov_base = base; 136 expected_io->iov[pos].iov_len = len; 137 } 138 139 static void 140 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 141 { 142 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 143 struct ut_expected_io *expected_io; 144 struct iovec *iov, *expected_iov; 145 struct spdk_bdev_io *bio_to_abort; 146 int i; 147 148 g_bdev_io = bdev_io; 149 150 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 151 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 152 153 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 154 CU_ASSERT(g_compare_read_buf_len == len); 155 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 156 } 157 158 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 159 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 160 161 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 162 CU_ASSERT(g_compare_write_buf_len == len); 163 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 164 } 165 166 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 167 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 168 169 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 170 CU_ASSERT(g_compare_read_buf_len == len); 171 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 172 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 173 } 174 } 175 176 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 177 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 178 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 179 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 180 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 181 ch->outstanding_io_count--; 182 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 183 break; 184 } 185 } 186 } 187 } 188 189 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 190 if (bdev_io->u.bdev.zcopy.start) { 191 g_zcopy_bdev_io = bdev_io; 192 if (bdev_io->u.bdev.zcopy.populate) { 193 /* Start of a read */ 194 CU_ASSERT(g_zcopy_read_buf != NULL); 195 CU_ASSERT(g_zcopy_read_buf_len > 0); 196 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 197 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 198 bdev_io->u.bdev.iovcnt = 1; 199 } else { 200 /* Start of a write */ 201 CU_ASSERT(g_zcopy_write_buf != NULL); 202 CU_ASSERT(g_zcopy_write_buf_len > 0); 203 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 204 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 205 bdev_io->u.bdev.iovcnt = 1; 206 } 207 } else { 208 if (bdev_io->u.bdev.zcopy.commit) { 209 /* End of write */ 210 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 211 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 212 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 213 g_zcopy_write_buf = NULL; 214 g_zcopy_write_buf_len = 0; 215 } else { 216 /* End of read */ 217 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 218 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 219 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 220 g_zcopy_read_buf = NULL; 221 g_zcopy_read_buf_len = 0; 222 } 223 } 224 } 225 226 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 227 ch->outstanding_io_count++; 228 229 expected_io = TAILQ_FIRST(&ch->expected_io); 230 if (expected_io == NULL) { 231 return; 232 } 233 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 234 235 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 236 CU_ASSERT(bdev_io->type == expected_io->type); 237 } 238 239 if (expected_io->md_buf != NULL) { 240 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 241 } 242 243 if (expected_io->length == 0) { 244 free(expected_io); 245 return; 246 } 247 248 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 249 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 250 251 if (expected_io->iovcnt == 0) { 252 free(expected_io); 253 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 254 return; 255 } 256 257 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 258 for (i = 0; i < expected_io->iovcnt; i++) { 259 iov = &bdev_io->u.bdev.iovs[i]; 260 expected_iov = &expected_io->iov[i]; 261 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 262 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 263 } 264 265 if (expected_io->ext_io_opts) { 266 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts) 267 } 268 269 free(expected_io); 270 } 271 272 static void 273 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 274 struct spdk_bdev_io *bdev_io, bool success) 275 { 276 CU_ASSERT(success == true); 277 278 stub_submit_request(_ch, bdev_io); 279 } 280 281 static void 282 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 283 { 284 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 285 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 286 } 287 288 static uint32_t 289 stub_complete_io(uint32_t num_to_complete) 290 { 291 struct bdev_ut_channel *ch = g_bdev_ut_channel; 292 struct spdk_bdev_io *bdev_io; 293 static enum spdk_bdev_io_status io_status; 294 uint32_t num_completed = 0; 295 296 while (num_completed < num_to_complete) { 297 if (TAILQ_EMPTY(&ch->outstanding_io)) { 298 break; 299 } 300 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 301 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 302 ch->outstanding_io_count--; 303 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 304 g_io_exp_status; 305 spdk_bdev_io_complete(bdev_io, io_status); 306 num_completed++; 307 } 308 309 return num_completed; 310 } 311 312 static struct spdk_io_channel * 313 bdev_ut_get_io_channel(void *ctx) 314 { 315 return spdk_get_io_channel(&g_bdev_ut_io_device); 316 } 317 318 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 319 [SPDK_BDEV_IO_TYPE_READ] = true, 320 [SPDK_BDEV_IO_TYPE_WRITE] = true, 321 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 322 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 323 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 324 [SPDK_BDEV_IO_TYPE_RESET] = true, 325 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 326 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 327 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 328 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 329 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 330 [SPDK_BDEV_IO_TYPE_ABORT] = true, 331 }; 332 333 static void 334 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 335 { 336 g_io_types_supported[io_type] = enable; 337 } 338 339 static bool 340 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 341 { 342 return g_io_types_supported[io_type]; 343 } 344 345 static struct spdk_bdev_fn_table fn_table = { 346 .destruct = stub_destruct, 347 .submit_request = stub_submit_request, 348 .get_io_channel = bdev_ut_get_io_channel, 349 .io_type_supported = stub_io_type_supported, 350 }; 351 352 static int 353 bdev_ut_create_ch(void *io_device, void *ctx_buf) 354 { 355 struct bdev_ut_channel *ch = ctx_buf; 356 357 CU_ASSERT(g_bdev_ut_channel == NULL); 358 g_bdev_ut_channel = ch; 359 360 TAILQ_INIT(&ch->outstanding_io); 361 ch->outstanding_io_count = 0; 362 TAILQ_INIT(&ch->expected_io); 363 return 0; 364 } 365 366 static void 367 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 368 { 369 CU_ASSERT(g_bdev_ut_channel != NULL); 370 g_bdev_ut_channel = NULL; 371 } 372 373 struct spdk_bdev_module bdev_ut_if; 374 375 static int 376 bdev_ut_module_init(void) 377 { 378 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 379 sizeof(struct bdev_ut_channel), NULL); 380 spdk_bdev_module_init_done(&bdev_ut_if); 381 return 0; 382 } 383 384 static void 385 bdev_ut_module_fini(void) 386 { 387 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 388 } 389 390 struct spdk_bdev_module bdev_ut_if = { 391 .name = "bdev_ut", 392 .module_init = bdev_ut_module_init, 393 .module_fini = bdev_ut_module_fini, 394 .async_init = true, 395 }; 396 397 static void vbdev_ut_examine(struct spdk_bdev *bdev); 398 399 static int 400 vbdev_ut_module_init(void) 401 { 402 return 0; 403 } 404 405 static void 406 vbdev_ut_module_fini(void) 407 { 408 } 409 410 struct spdk_bdev_module vbdev_ut_if = { 411 .name = "vbdev_ut", 412 .module_init = vbdev_ut_module_init, 413 .module_fini = vbdev_ut_module_fini, 414 .examine_config = vbdev_ut_examine, 415 }; 416 417 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 418 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 419 420 static void 421 vbdev_ut_examine(struct spdk_bdev *bdev) 422 { 423 spdk_bdev_module_examine_done(&vbdev_ut_if); 424 } 425 426 static struct spdk_bdev * 427 allocate_bdev(char *name) 428 { 429 struct spdk_bdev *bdev; 430 int rc; 431 432 bdev = calloc(1, sizeof(*bdev)); 433 SPDK_CU_ASSERT_FATAL(bdev != NULL); 434 435 bdev->name = name; 436 bdev->fn_table = &fn_table; 437 bdev->module = &bdev_ut_if; 438 bdev->blockcnt = 1024; 439 bdev->blocklen = 512; 440 441 rc = spdk_bdev_register(bdev); 442 CU_ASSERT(rc == 0); 443 444 return bdev; 445 } 446 447 static struct spdk_bdev * 448 allocate_vbdev(char *name) 449 { 450 struct spdk_bdev *bdev; 451 int rc; 452 453 bdev = calloc(1, sizeof(*bdev)); 454 SPDK_CU_ASSERT_FATAL(bdev != NULL); 455 456 bdev->name = name; 457 bdev->fn_table = &fn_table; 458 bdev->module = &vbdev_ut_if; 459 460 rc = spdk_bdev_register(bdev); 461 CU_ASSERT(rc == 0); 462 463 return bdev; 464 } 465 466 static void 467 free_bdev(struct spdk_bdev *bdev) 468 { 469 spdk_bdev_unregister(bdev, NULL, NULL); 470 poll_threads(); 471 memset(bdev, 0xFF, sizeof(*bdev)); 472 free(bdev); 473 } 474 475 static void 476 free_vbdev(struct spdk_bdev *bdev) 477 { 478 spdk_bdev_unregister(bdev, NULL, NULL); 479 poll_threads(); 480 memset(bdev, 0xFF, sizeof(*bdev)); 481 free(bdev); 482 } 483 484 static void 485 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 486 { 487 const char *bdev_name; 488 489 CU_ASSERT(bdev != NULL); 490 CU_ASSERT(rc == 0); 491 bdev_name = spdk_bdev_get_name(bdev); 492 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 493 494 free(stat); 495 496 *(bool *)cb_arg = true; 497 } 498 499 static void 500 bdev_unregister_cb(void *cb_arg, int rc) 501 { 502 g_unregister_arg = cb_arg; 503 g_unregister_rc = rc; 504 } 505 506 static void 507 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 508 { 509 } 510 511 static void 512 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 513 { 514 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 515 516 g_event_type1 = type; 517 if (SPDK_BDEV_EVENT_REMOVE == type) { 518 spdk_bdev_close(desc); 519 } 520 } 521 522 static void 523 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 524 { 525 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 526 527 g_event_type2 = type; 528 if (SPDK_BDEV_EVENT_REMOVE == type) { 529 spdk_bdev_close(desc); 530 } 531 } 532 533 static void 534 get_device_stat_test(void) 535 { 536 struct spdk_bdev *bdev; 537 struct spdk_bdev_io_stat *stat; 538 bool done; 539 540 bdev = allocate_bdev("bdev0"); 541 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 542 if (stat == NULL) { 543 free_bdev(bdev); 544 return; 545 } 546 547 done = false; 548 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 549 while (!done) { poll_threads(); } 550 551 free_bdev(bdev); 552 } 553 554 static void 555 open_write_test(void) 556 { 557 struct spdk_bdev *bdev[9]; 558 struct spdk_bdev_desc *desc[9] = {}; 559 int rc; 560 561 /* 562 * Create a tree of bdevs to test various open w/ write cases. 563 * 564 * bdev0 through bdev3 are physical block devices, such as NVMe 565 * namespaces or Ceph block devices. 566 * 567 * bdev4 is a virtual bdev with multiple base bdevs. This models 568 * caching or RAID use cases. 569 * 570 * bdev5 through bdev7 are all virtual bdevs with the same base 571 * bdev (except bdev7). This models partitioning or logical volume 572 * use cases. 573 * 574 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 575 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 576 * models caching, RAID, partitioning or logical volumes use cases. 577 * 578 * bdev8 is a virtual bdev with multiple base bdevs, but these 579 * base bdevs are themselves virtual bdevs. 580 * 581 * bdev8 582 * | 583 * +----------+ 584 * | | 585 * bdev4 bdev5 bdev6 bdev7 586 * | | | | 587 * +---+---+ +---+ + +---+---+ 588 * | | \ | / \ 589 * bdev0 bdev1 bdev2 bdev3 590 */ 591 592 bdev[0] = allocate_bdev("bdev0"); 593 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 594 CU_ASSERT(rc == 0); 595 596 bdev[1] = allocate_bdev("bdev1"); 597 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 598 CU_ASSERT(rc == 0); 599 600 bdev[2] = allocate_bdev("bdev2"); 601 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 602 CU_ASSERT(rc == 0); 603 604 bdev[3] = allocate_bdev("bdev3"); 605 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 606 CU_ASSERT(rc == 0); 607 608 bdev[4] = allocate_vbdev("bdev4"); 609 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 610 CU_ASSERT(rc == 0); 611 612 bdev[5] = allocate_vbdev("bdev5"); 613 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 614 CU_ASSERT(rc == 0); 615 616 bdev[6] = allocate_vbdev("bdev6"); 617 618 bdev[7] = allocate_vbdev("bdev7"); 619 620 bdev[8] = allocate_vbdev("bdev8"); 621 622 /* Open bdev0 read-only. This should succeed. */ 623 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 624 CU_ASSERT(rc == 0); 625 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 626 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 627 spdk_bdev_close(desc[0]); 628 629 /* 630 * Open bdev1 read/write. This should fail since bdev1 has been claimed 631 * by a vbdev module. 632 */ 633 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 634 CU_ASSERT(rc == -EPERM); 635 636 /* 637 * Open bdev4 read/write. This should fail since bdev3 has been claimed 638 * by a vbdev module. 639 */ 640 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 641 CU_ASSERT(rc == -EPERM); 642 643 /* Open bdev4 read-only. This should succeed. */ 644 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 645 CU_ASSERT(rc == 0); 646 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 647 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 648 spdk_bdev_close(desc[4]); 649 650 /* 651 * Open bdev8 read/write. This should succeed since it is a leaf 652 * bdev. 653 */ 654 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 655 CU_ASSERT(rc == 0); 656 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 657 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 658 spdk_bdev_close(desc[8]); 659 660 /* 661 * Open bdev5 read/write. This should fail since bdev4 has been claimed 662 * by a vbdev module. 663 */ 664 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 665 CU_ASSERT(rc == -EPERM); 666 667 /* Open bdev4 read-only. This should succeed. */ 668 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 669 CU_ASSERT(rc == 0); 670 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 671 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 672 spdk_bdev_close(desc[5]); 673 674 free_vbdev(bdev[8]); 675 676 free_vbdev(bdev[5]); 677 free_vbdev(bdev[6]); 678 free_vbdev(bdev[7]); 679 680 free_vbdev(bdev[4]); 681 682 free_bdev(bdev[0]); 683 free_bdev(bdev[1]); 684 free_bdev(bdev[2]); 685 free_bdev(bdev[3]); 686 } 687 688 static void 689 bytes_to_blocks_test(void) 690 { 691 struct spdk_bdev bdev; 692 uint64_t offset_blocks, num_blocks; 693 694 memset(&bdev, 0, sizeof(bdev)); 695 696 bdev.blocklen = 512; 697 698 /* All parameters valid */ 699 offset_blocks = 0; 700 num_blocks = 0; 701 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 702 CU_ASSERT(offset_blocks == 1); 703 CU_ASSERT(num_blocks == 2); 704 705 /* Offset not a block multiple */ 706 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 707 708 /* Length not a block multiple */ 709 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 710 711 /* In case blocklen not the power of two */ 712 bdev.blocklen = 100; 713 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 714 CU_ASSERT(offset_blocks == 1); 715 CU_ASSERT(num_blocks == 2); 716 717 /* Offset not a block multiple */ 718 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 719 720 /* Length not a block multiple */ 721 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 722 } 723 724 static void 725 num_blocks_test(void) 726 { 727 struct spdk_bdev bdev; 728 struct spdk_bdev_desc *desc = NULL; 729 int rc; 730 731 memset(&bdev, 0, sizeof(bdev)); 732 bdev.name = "num_blocks"; 733 bdev.fn_table = &fn_table; 734 bdev.module = &bdev_ut_if; 735 spdk_bdev_register(&bdev); 736 spdk_bdev_notify_blockcnt_change(&bdev, 50); 737 738 /* Growing block number */ 739 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 740 /* Shrinking block number */ 741 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 742 743 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 744 CU_ASSERT(rc == 0); 745 SPDK_CU_ASSERT_FATAL(desc != NULL); 746 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 747 748 /* Growing block number */ 749 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 750 /* Shrinking block number */ 751 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 752 753 g_event_type1 = 0xFF; 754 /* Growing block number */ 755 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 756 757 poll_threads(); 758 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 759 760 g_event_type1 = 0xFF; 761 /* Growing block number and closing */ 762 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 763 764 spdk_bdev_close(desc); 765 spdk_bdev_unregister(&bdev, NULL, NULL); 766 767 poll_threads(); 768 769 /* Callback is not called for closed device */ 770 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 771 } 772 773 static void 774 io_valid_test(void) 775 { 776 struct spdk_bdev bdev; 777 778 memset(&bdev, 0, sizeof(bdev)); 779 780 bdev.blocklen = 512; 781 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 782 783 spdk_bdev_notify_blockcnt_change(&bdev, 100); 784 785 /* All parameters valid */ 786 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 787 788 /* Last valid block */ 789 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 790 791 /* Offset past end of bdev */ 792 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 793 794 /* Offset + length past end of bdev */ 795 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 796 797 /* Offset near end of uint64_t range (2^64 - 1) */ 798 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 799 800 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 801 } 802 803 static void 804 alias_add_del_test(void) 805 { 806 struct spdk_bdev *bdev[3]; 807 int rc; 808 809 /* Creating and registering bdevs */ 810 bdev[0] = allocate_bdev("bdev0"); 811 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 812 813 bdev[1] = allocate_bdev("bdev1"); 814 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 815 816 bdev[2] = allocate_bdev("bdev2"); 817 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 818 819 poll_threads(); 820 821 /* 822 * Trying adding an alias identical to name. 823 * Alias is identical to name, so it can not be added to aliases list 824 */ 825 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 826 CU_ASSERT(rc == -EEXIST); 827 828 /* 829 * Trying to add empty alias, 830 * this one should fail 831 */ 832 rc = spdk_bdev_alias_add(bdev[0], NULL); 833 CU_ASSERT(rc == -EINVAL); 834 835 /* Trying adding same alias to two different registered bdevs */ 836 837 /* Alias is used first time, so this one should pass */ 838 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 839 CU_ASSERT(rc == 0); 840 841 /* Alias was added to another bdev, so this one should fail */ 842 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 843 CU_ASSERT(rc == -EEXIST); 844 845 /* Alias is used first time, so this one should pass */ 846 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 847 CU_ASSERT(rc == 0); 848 849 /* Trying removing an alias from registered bdevs */ 850 851 /* Alias is not on a bdev aliases list, so this one should fail */ 852 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 853 CU_ASSERT(rc == -ENOENT); 854 855 /* Alias is present on a bdev aliases list, so this one should pass */ 856 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 857 CU_ASSERT(rc == 0); 858 859 /* Alias is present on a bdev aliases list, so this one should pass */ 860 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 861 CU_ASSERT(rc == 0); 862 863 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 864 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 865 CU_ASSERT(rc != 0); 866 867 /* Trying to del all alias from empty alias list */ 868 spdk_bdev_alias_del_all(bdev[2]); 869 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 870 871 /* Trying to del all alias from non-empty alias list */ 872 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 873 CU_ASSERT(rc == 0); 874 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 875 CU_ASSERT(rc == 0); 876 spdk_bdev_alias_del_all(bdev[2]); 877 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 878 879 /* Unregister and free bdevs */ 880 spdk_bdev_unregister(bdev[0], NULL, NULL); 881 spdk_bdev_unregister(bdev[1], NULL, NULL); 882 spdk_bdev_unregister(bdev[2], NULL, NULL); 883 884 poll_threads(); 885 886 free(bdev[0]); 887 free(bdev[1]); 888 free(bdev[2]); 889 } 890 891 static void 892 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 893 { 894 g_io_done = true; 895 g_io_status = bdev_io->internal.status; 896 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 897 (bdev_io->u.bdev.zcopy.start)) { 898 g_zcopy_bdev_io = bdev_io; 899 } else { 900 spdk_bdev_free_io(bdev_io); 901 g_zcopy_bdev_io = NULL; 902 } 903 } 904 905 static void 906 bdev_init_cb(void *arg, int rc) 907 { 908 CU_ASSERT(rc == 0); 909 } 910 911 static void 912 bdev_fini_cb(void *arg) 913 { 914 } 915 916 struct bdev_ut_io_wait_entry { 917 struct spdk_bdev_io_wait_entry entry; 918 struct spdk_io_channel *io_ch; 919 struct spdk_bdev_desc *desc; 920 bool submitted; 921 }; 922 923 static void 924 io_wait_cb(void *arg) 925 { 926 struct bdev_ut_io_wait_entry *entry = arg; 927 int rc; 928 929 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 930 CU_ASSERT(rc == 0); 931 entry->submitted = true; 932 } 933 934 static void 935 bdev_io_types_test(void) 936 { 937 struct spdk_bdev *bdev; 938 struct spdk_bdev_desc *desc = NULL; 939 struct spdk_io_channel *io_ch; 940 struct spdk_bdev_opts bdev_opts = {}; 941 int rc; 942 943 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 944 bdev_opts.bdev_io_pool_size = 4; 945 bdev_opts.bdev_io_cache_size = 2; 946 947 rc = spdk_bdev_set_opts(&bdev_opts); 948 CU_ASSERT(rc == 0); 949 spdk_bdev_initialize(bdev_init_cb, NULL); 950 poll_threads(); 951 952 bdev = allocate_bdev("bdev0"); 953 954 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 955 CU_ASSERT(rc == 0); 956 poll_threads(); 957 SPDK_CU_ASSERT_FATAL(desc != NULL); 958 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 959 io_ch = spdk_bdev_get_io_channel(desc); 960 CU_ASSERT(io_ch != NULL); 961 962 /* WRITE and WRITE ZEROES are not supported */ 963 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 964 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 965 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 966 CU_ASSERT(rc == -ENOTSUP); 967 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 968 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 969 970 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 971 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 972 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 973 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 974 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 975 CU_ASSERT(rc == -ENOTSUP); 976 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 977 CU_ASSERT(rc == -ENOTSUP); 978 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 979 CU_ASSERT(rc == -ENOTSUP); 980 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 981 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 982 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 983 984 spdk_put_io_channel(io_ch); 985 spdk_bdev_close(desc); 986 free_bdev(bdev); 987 spdk_bdev_finish(bdev_fini_cb, NULL); 988 poll_threads(); 989 } 990 991 static void 992 bdev_io_wait_test(void) 993 { 994 struct spdk_bdev *bdev; 995 struct spdk_bdev_desc *desc = NULL; 996 struct spdk_io_channel *io_ch; 997 struct spdk_bdev_opts bdev_opts = {}; 998 struct bdev_ut_io_wait_entry io_wait_entry; 999 struct bdev_ut_io_wait_entry io_wait_entry2; 1000 int rc; 1001 1002 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1003 bdev_opts.bdev_io_pool_size = 4; 1004 bdev_opts.bdev_io_cache_size = 2; 1005 1006 rc = spdk_bdev_set_opts(&bdev_opts); 1007 CU_ASSERT(rc == 0); 1008 spdk_bdev_initialize(bdev_init_cb, NULL); 1009 poll_threads(); 1010 1011 bdev = allocate_bdev("bdev0"); 1012 1013 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1014 CU_ASSERT(rc == 0); 1015 poll_threads(); 1016 SPDK_CU_ASSERT_FATAL(desc != NULL); 1017 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1018 io_ch = spdk_bdev_get_io_channel(desc); 1019 CU_ASSERT(io_ch != NULL); 1020 1021 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1022 CU_ASSERT(rc == 0); 1023 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1024 CU_ASSERT(rc == 0); 1025 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1026 CU_ASSERT(rc == 0); 1027 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1028 CU_ASSERT(rc == 0); 1029 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1030 1031 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1032 CU_ASSERT(rc == -ENOMEM); 1033 1034 io_wait_entry.entry.bdev = bdev; 1035 io_wait_entry.entry.cb_fn = io_wait_cb; 1036 io_wait_entry.entry.cb_arg = &io_wait_entry; 1037 io_wait_entry.io_ch = io_ch; 1038 io_wait_entry.desc = desc; 1039 io_wait_entry.submitted = false; 1040 /* Cannot use the same io_wait_entry for two different calls. */ 1041 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1042 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1043 1044 /* Queue two I/O waits. */ 1045 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1046 CU_ASSERT(rc == 0); 1047 CU_ASSERT(io_wait_entry.submitted == false); 1048 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1049 CU_ASSERT(rc == 0); 1050 CU_ASSERT(io_wait_entry2.submitted == false); 1051 1052 stub_complete_io(1); 1053 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1054 CU_ASSERT(io_wait_entry.submitted == true); 1055 CU_ASSERT(io_wait_entry2.submitted == false); 1056 1057 stub_complete_io(1); 1058 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1059 CU_ASSERT(io_wait_entry2.submitted == true); 1060 1061 stub_complete_io(4); 1062 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1063 1064 spdk_put_io_channel(io_ch); 1065 spdk_bdev_close(desc); 1066 free_bdev(bdev); 1067 spdk_bdev_finish(bdev_fini_cb, NULL); 1068 poll_threads(); 1069 } 1070 1071 static void 1072 bdev_io_spans_split_test(void) 1073 { 1074 struct spdk_bdev bdev; 1075 struct spdk_bdev_io bdev_io; 1076 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1077 1078 memset(&bdev, 0, sizeof(bdev)); 1079 bdev_io.u.bdev.iovs = iov; 1080 1081 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1082 bdev.optimal_io_boundary = 0; 1083 bdev.max_segment_size = 0; 1084 bdev.max_num_segments = 0; 1085 bdev_io.bdev = &bdev; 1086 1087 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1088 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1089 1090 bdev.split_on_optimal_io_boundary = true; 1091 bdev.optimal_io_boundary = 32; 1092 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1093 1094 /* RESETs are not based on LBAs - so this should return false. */ 1095 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1096 1097 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1098 bdev_io.u.bdev.offset_blocks = 0; 1099 bdev_io.u.bdev.num_blocks = 32; 1100 1101 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1102 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1103 1104 bdev_io.u.bdev.num_blocks = 33; 1105 1106 /* This I/O spans a boundary. */ 1107 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1108 1109 bdev_io.u.bdev.num_blocks = 32; 1110 bdev.max_segment_size = 512 * 32; 1111 bdev.max_num_segments = 1; 1112 bdev_io.u.bdev.iovcnt = 1; 1113 iov[0].iov_len = 512; 1114 1115 /* Does not cross and exceed max_size or max_segs */ 1116 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1117 1118 bdev.split_on_optimal_io_boundary = false; 1119 bdev.max_segment_size = 512; 1120 bdev.max_num_segments = 1; 1121 bdev_io.u.bdev.iovcnt = 2; 1122 1123 /* Exceed max_segs */ 1124 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1125 1126 bdev.max_num_segments = 2; 1127 iov[0].iov_len = 513; 1128 iov[1].iov_len = 512; 1129 1130 /* Exceed max_sizes */ 1131 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1132 } 1133 1134 static void 1135 bdev_io_boundary_split_test(void) 1136 { 1137 struct spdk_bdev *bdev; 1138 struct spdk_bdev_desc *desc = NULL; 1139 struct spdk_io_channel *io_ch; 1140 struct spdk_bdev_opts bdev_opts = {}; 1141 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1142 struct ut_expected_io *expected_io; 1143 void *md_buf = (void *)0xFF000000; 1144 uint64_t i; 1145 int rc; 1146 1147 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1148 bdev_opts.bdev_io_pool_size = 512; 1149 bdev_opts.bdev_io_cache_size = 64; 1150 1151 rc = spdk_bdev_set_opts(&bdev_opts); 1152 CU_ASSERT(rc == 0); 1153 spdk_bdev_initialize(bdev_init_cb, NULL); 1154 1155 bdev = allocate_bdev("bdev0"); 1156 1157 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1158 CU_ASSERT(rc == 0); 1159 SPDK_CU_ASSERT_FATAL(desc != NULL); 1160 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1161 io_ch = spdk_bdev_get_io_channel(desc); 1162 CU_ASSERT(io_ch != NULL); 1163 1164 bdev->optimal_io_boundary = 16; 1165 bdev->split_on_optimal_io_boundary = false; 1166 1167 g_io_done = false; 1168 1169 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1170 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1171 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1172 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1173 1174 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1175 CU_ASSERT(rc == 0); 1176 CU_ASSERT(g_io_done == false); 1177 1178 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1179 stub_complete_io(1); 1180 CU_ASSERT(g_io_done == true); 1181 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1182 1183 bdev->split_on_optimal_io_boundary = true; 1184 bdev->md_interleave = false; 1185 bdev->md_len = 8; 1186 1187 /* Now test that a single-vector command is split correctly. 1188 * Offset 14, length 8, payload 0xF000 1189 * Child - Offset 14, length 2, payload 0xF000 1190 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1191 * 1192 * Set up the expected values before calling spdk_bdev_read_blocks 1193 */ 1194 g_io_done = false; 1195 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1196 expected_io->md_buf = md_buf; 1197 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1198 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1199 1200 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1201 expected_io->md_buf = md_buf + 2 * 8; 1202 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1203 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1204 1205 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1206 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1207 14, 8, io_done, NULL); 1208 CU_ASSERT(rc == 0); 1209 CU_ASSERT(g_io_done == false); 1210 1211 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1212 stub_complete_io(2); 1213 CU_ASSERT(g_io_done == true); 1214 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1215 1216 /* Now set up a more complex, multi-vector command that needs to be split, 1217 * including splitting iovecs. 1218 */ 1219 iov[0].iov_base = (void *)0x10000; 1220 iov[0].iov_len = 512; 1221 iov[1].iov_base = (void *)0x20000; 1222 iov[1].iov_len = 20 * 512; 1223 iov[2].iov_base = (void *)0x30000; 1224 iov[2].iov_len = 11 * 512; 1225 1226 g_io_done = false; 1227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1228 expected_io->md_buf = md_buf; 1229 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1230 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1231 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1232 1233 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1234 expected_io->md_buf = md_buf + 2 * 8; 1235 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1236 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1237 1238 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1239 expected_io->md_buf = md_buf + 18 * 8; 1240 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1241 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1242 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1243 1244 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1245 14, 32, io_done, NULL); 1246 CU_ASSERT(rc == 0); 1247 CU_ASSERT(g_io_done == false); 1248 1249 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1250 stub_complete_io(3); 1251 CU_ASSERT(g_io_done == true); 1252 1253 /* Test multi vector command that needs to be split by strip and then needs to be 1254 * split further due to the capacity of child iovs. 1255 */ 1256 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1257 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1258 iov[i].iov_len = 512; 1259 } 1260 1261 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1262 g_io_done = false; 1263 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1264 BDEV_IO_NUM_CHILD_IOV); 1265 expected_io->md_buf = md_buf; 1266 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1267 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1268 } 1269 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1270 1271 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1272 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1273 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1274 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1275 ut_expected_io_set_iov(expected_io, i, 1276 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1277 } 1278 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1279 1280 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1281 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1282 CU_ASSERT(rc == 0); 1283 CU_ASSERT(g_io_done == false); 1284 1285 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1286 stub_complete_io(1); 1287 CU_ASSERT(g_io_done == false); 1288 1289 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1290 stub_complete_io(1); 1291 CU_ASSERT(g_io_done == true); 1292 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1293 1294 /* Test multi vector command that needs to be split by strip and then needs to be 1295 * split further due to the capacity of child iovs. In this case, the length of 1296 * the rest of iovec array with an I/O boundary is the multiple of block size. 1297 */ 1298 1299 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1300 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1301 */ 1302 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1303 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1304 iov[i].iov_len = 512; 1305 } 1306 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1307 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1308 iov[i].iov_len = 256; 1309 } 1310 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1311 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1312 1313 /* Add an extra iovec to trigger split */ 1314 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1315 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1316 1317 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1318 g_io_done = false; 1319 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1320 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1321 expected_io->md_buf = md_buf; 1322 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1323 ut_expected_io_set_iov(expected_io, i, 1324 (void *)((i + 1) * 0x10000), 512); 1325 } 1326 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1327 ut_expected_io_set_iov(expected_io, i, 1328 (void *)((i + 1) * 0x10000), 256); 1329 } 1330 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1331 1332 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1333 1, 1); 1334 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1335 ut_expected_io_set_iov(expected_io, 0, 1336 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1337 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1338 1339 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1340 1, 1); 1341 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1342 ut_expected_io_set_iov(expected_io, 0, 1343 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1344 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1345 1346 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1347 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1348 CU_ASSERT(rc == 0); 1349 CU_ASSERT(g_io_done == false); 1350 1351 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1352 stub_complete_io(1); 1353 CU_ASSERT(g_io_done == false); 1354 1355 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1356 stub_complete_io(2); 1357 CU_ASSERT(g_io_done == true); 1358 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1359 1360 /* Test multi vector command that needs to be split by strip and then needs to be 1361 * split further due to the capacity of child iovs, the child request offset should 1362 * be rewind to last aligned offset and go success without error. 1363 */ 1364 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1365 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1366 iov[i].iov_len = 512; 1367 } 1368 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1369 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1370 1371 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1372 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1373 1374 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1375 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1376 1377 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1378 g_io_done = false; 1379 g_io_status = 0; 1380 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1381 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1382 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1383 expected_io->md_buf = md_buf; 1384 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1385 ut_expected_io_set_iov(expected_io, i, 1386 (void *)((i + 1) * 0x10000), 512); 1387 } 1388 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1389 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1390 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1391 1, 2); 1392 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1393 ut_expected_io_set_iov(expected_io, 0, 1394 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1395 ut_expected_io_set_iov(expected_io, 1, 1396 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1397 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1398 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1399 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1400 1, 1); 1401 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1402 ut_expected_io_set_iov(expected_io, 0, 1403 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1404 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1405 1406 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1407 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1408 CU_ASSERT(rc == 0); 1409 CU_ASSERT(g_io_done == false); 1410 1411 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1412 stub_complete_io(1); 1413 CU_ASSERT(g_io_done == false); 1414 1415 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1416 stub_complete_io(2); 1417 CU_ASSERT(g_io_done == true); 1418 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1419 1420 /* Test multi vector command that needs to be split due to the IO boundary and 1421 * the capacity of child iovs. Especially test the case when the command is 1422 * split due to the capacity of child iovs, the tail address is not aligned with 1423 * block size and is rewinded to the aligned address. 1424 * 1425 * The iovecs used in read request is complex but is based on the data 1426 * collected in the real issue. We change the base addresses but keep the lengths 1427 * not to loose the credibility of the test. 1428 */ 1429 bdev->optimal_io_boundary = 128; 1430 g_io_done = false; 1431 g_io_status = 0; 1432 1433 for (i = 0; i < 31; i++) { 1434 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1435 iov[i].iov_len = 1024; 1436 } 1437 iov[31].iov_base = (void *)0xFEED1F00000; 1438 iov[31].iov_len = 32768; 1439 iov[32].iov_base = (void *)0xFEED2000000; 1440 iov[32].iov_len = 160; 1441 iov[33].iov_base = (void *)0xFEED2100000; 1442 iov[33].iov_len = 4096; 1443 iov[34].iov_base = (void *)0xFEED2200000; 1444 iov[34].iov_len = 4096; 1445 iov[35].iov_base = (void *)0xFEED2300000; 1446 iov[35].iov_len = 4096; 1447 iov[36].iov_base = (void *)0xFEED2400000; 1448 iov[36].iov_len = 4096; 1449 iov[37].iov_base = (void *)0xFEED2500000; 1450 iov[37].iov_len = 4096; 1451 iov[38].iov_base = (void *)0xFEED2600000; 1452 iov[38].iov_len = 4096; 1453 iov[39].iov_base = (void *)0xFEED2700000; 1454 iov[39].iov_len = 4096; 1455 iov[40].iov_base = (void *)0xFEED2800000; 1456 iov[40].iov_len = 4096; 1457 iov[41].iov_base = (void *)0xFEED2900000; 1458 iov[41].iov_len = 4096; 1459 iov[42].iov_base = (void *)0xFEED2A00000; 1460 iov[42].iov_len = 4096; 1461 iov[43].iov_base = (void *)0xFEED2B00000; 1462 iov[43].iov_len = 12288; 1463 iov[44].iov_base = (void *)0xFEED2C00000; 1464 iov[44].iov_len = 8192; 1465 iov[45].iov_base = (void *)0xFEED2F00000; 1466 iov[45].iov_len = 4096; 1467 iov[46].iov_base = (void *)0xFEED3000000; 1468 iov[46].iov_len = 4096; 1469 iov[47].iov_base = (void *)0xFEED3100000; 1470 iov[47].iov_len = 4096; 1471 iov[48].iov_base = (void *)0xFEED3200000; 1472 iov[48].iov_len = 24576; 1473 iov[49].iov_base = (void *)0xFEED3300000; 1474 iov[49].iov_len = 16384; 1475 iov[50].iov_base = (void *)0xFEED3400000; 1476 iov[50].iov_len = 12288; 1477 iov[51].iov_base = (void *)0xFEED3500000; 1478 iov[51].iov_len = 4096; 1479 iov[52].iov_base = (void *)0xFEED3600000; 1480 iov[52].iov_len = 4096; 1481 iov[53].iov_base = (void *)0xFEED3700000; 1482 iov[53].iov_len = 4096; 1483 iov[54].iov_base = (void *)0xFEED3800000; 1484 iov[54].iov_len = 28672; 1485 iov[55].iov_base = (void *)0xFEED3900000; 1486 iov[55].iov_len = 20480; 1487 iov[56].iov_base = (void *)0xFEED3A00000; 1488 iov[56].iov_len = 4096; 1489 iov[57].iov_base = (void *)0xFEED3B00000; 1490 iov[57].iov_len = 12288; 1491 iov[58].iov_base = (void *)0xFEED3C00000; 1492 iov[58].iov_len = 4096; 1493 iov[59].iov_base = (void *)0xFEED3D00000; 1494 iov[59].iov_len = 4096; 1495 iov[60].iov_base = (void *)0xFEED3E00000; 1496 iov[60].iov_len = 352; 1497 1498 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1499 * of child iovs, 1500 */ 1501 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1502 expected_io->md_buf = md_buf; 1503 for (i = 0; i < 32; i++) { 1504 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1505 } 1506 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1507 1508 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1509 * split by the IO boundary requirement. 1510 */ 1511 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1512 expected_io->md_buf = md_buf + 126 * 8; 1513 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1514 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1516 1517 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1518 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1519 */ 1520 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1521 expected_io->md_buf = md_buf + 128 * 8; 1522 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1523 iov[33].iov_len - 864); 1524 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1525 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1526 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1527 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1528 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1529 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1530 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1531 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1532 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1533 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1534 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1535 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1536 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1537 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1538 1539 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1540 * first 864 bytes of iov[52] split by the IO boundary requirement. 1541 */ 1542 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1543 expected_io->md_buf = md_buf + 256 * 8; 1544 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1545 iov[46].iov_len - 864); 1546 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1547 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1548 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1549 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1550 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1551 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1552 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1553 1554 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1555 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1556 */ 1557 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1558 expected_io->md_buf = md_buf + 384 * 8; 1559 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1560 iov[52].iov_len - 864); 1561 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1562 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1563 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1564 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1565 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1566 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1567 1568 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1569 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1570 */ 1571 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1572 expected_io->md_buf = md_buf + 512 * 8; 1573 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1574 iov[57].iov_len - 4960); 1575 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1576 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1577 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1578 1579 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1580 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1581 expected_io->md_buf = md_buf + 542 * 8; 1582 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1583 iov[59].iov_len - 3936); 1584 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1585 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1586 1587 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1588 0, 543, io_done, NULL); 1589 CU_ASSERT(rc == 0); 1590 CU_ASSERT(g_io_done == false); 1591 1592 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1593 stub_complete_io(1); 1594 CU_ASSERT(g_io_done == false); 1595 1596 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1597 stub_complete_io(5); 1598 CU_ASSERT(g_io_done == false); 1599 1600 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1601 stub_complete_io(1); 1602 CU_ASSERT(g_io_done == true); 1603 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1604 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1605 1606 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1607 * split, so test that. 1608 */ 1609 bdev->optimal_io_boundary = 15; 1610 g_io_done = false; 1611 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1612 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1613 1614 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1615 CU_ASSERT(rc == 0); 1616 CU_ASSERT(g_io_done == false); 1617 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1618 stub_complete_io(1); 1619 CU_ASSERT(g_io_done == true); 1620 1621 /* Test an UNMAP. This should also not be split. */ 1622 bdev->optimal_io_boundary = 16; 1623 g_io_done = false; 1624 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1625 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1626 1627 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1628 CU_ASSERT(rc == 0); 1629 CU_ASSERT(g_io_done == false); 1630 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1631 stub_complete_io(1); 1632 CU_ASSERT(g_io_done == true); 1633 1634 /* Test a FLUSH. This should also not be split. */ 1635 bdev->optimal_io_boundary = 16; 1636 g_io_done = false; 1637 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1638 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1639 1640 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1641 CU_ASSERT(rc == 0); 1642 CU_ASSERT(g_io_done == false); 1643 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1644 stub_complete_io(1); 1645 CU_ASSERT(g_io_done == true); 1646 1647 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1648 1649 /* Children requests return an error status */ 1650 bdev->optimal_io_boundary = 16; 1651 iov[0].iov_base = (void *)0x10000; 1652 iov[0].iov_len = 512 * 64; 1653 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1654 g_io_done = false; 1655 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1656 1657 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1658 CU_ASSERT(rc == 0); 1659 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1660 stub_complete_io(4); 1661 CU_ASSERT(g_io_done == false); 1662 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1663 stub_complete_io(1); 1664 CU_ASSERT(g_io_done == true); 1665 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1666 1667 /* Test if a multi vector command terminated with failure before continuing 1668 * splitting process when one of child I/O failed. 1669 * The multi vector command is as same as the above that needs to be split by strip 1670 * and then needs to be split further due to the capacity of child iovs. 1671 */ 1672 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1673 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1674 iov[i].iov_len = 512; 1675 } 1676 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1677 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1678 1679 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1680 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1681 1682 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1683 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1684 1685 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1686 1687 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1688 g_io_done = false; 1689 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1690 1691 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1692 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1693 CU_ASSERT(rc == 0); 1694 CU_ASSERT(g_io_done == false); 1695 1696 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1697 stub_complete_io(1); 1698 CU_ASSERT(g_io_done == true); 1699 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1700 1701 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1702 1703 /* for this test we will create the following conditions to hit the code path where 1704 * we are trying to send and IO following a split that has no iovs because we had to 1705 * trim them for alignment reasons. 1706 * 1707 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1708 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1709 * position 30 and overshoot by 0x2e. 1710 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1711 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1712 * which eliniates that vector so we just send the first split IO with 30 vectors 1713 * and let the completion pick up the last 2 vectors. 1714 */ 1715 bdev->optimal_io_boundary = 32; 1716 bdev->split_on_optimal_io_boundary = true; 1717 g_io_done = false; 1718 1719 /* Init all parent IOVs to 0x212 */ 1720 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1721 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1722 iov[i].iov_len = 0x212; 1723 } 1724 1725 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1726 BDEV_IO_NUM_CHILD_IOV - 1); 1727 /* expect 0-29 to be 1:1 with the parent iov */ 1728 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1729 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1730 } 1731 1732 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1733 * where 0x1e is the amount we overshot the 16K boundary 1734 */ 1735 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1736 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1737 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1738 1739 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1740 * shortened that take it to the next boundary and then a final one to get us to 1741 * 0x4200 bytes for the IO. 1742 */ 1743 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1744 BDEV_IO_NUM_CHILD_IOV, 2); 1745 /* position 30 picked up the remaining bytes to the next boundary */ 1746 ut_expected_io_set_iov(expected_io, 0, 1747 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1748 1749 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1750 ut_expected_io_set_iov(expected_io, 1, 1751 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1752 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1753 1754 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1755 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1756 CU_ASSERT(rc == 0); 1757 CU_ASSERT(g_io_done == false); 1758 1759 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1760 stub_complete_io(1); 1761 CU_ASSERT(g_io_done == false); 1762 1763 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1764 stub_complete_io(1); 1765 CU_ASSERT(g_io_done == true); 1766 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1767 1768 spdk_put_io_channel(io_ch); 1769 spdk_bdev_close(desc); 1770 free_bdev(bdev); 1771 spdk_bdev_finish(bdev_fini_cb, NULL); 1772 poll_threads(); 1773 } 1774 1775 static void 1776 bdev_io_max_size_and_segment_split_test(void) 1777 { 1778 struct spdk_bdev *bdev; 1779 struct spdk_bdev_desc *desc = NULL; 1780 struct spdk_io_channel *io_ch; 1781 struct spdk_bdev_opts bdev_opts = {}; 1782 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1783 struct ut_expected_io *expected_io; 1784 uint64_t i; 1785 int rc; 1786 1787 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1788 bdev_opts.bdev_io_pool_size = 512; 1789 bdev_opts.bdev_io_cache_size = 64; 1790 1791 bdev_opts.opts_size = sizeof(bdev_opts); 1792 rc = spdk_bdev_set_opts(&bdev_opts); 1793 CU_ASSERT(rc == 0); 1794 spdk_bdev_initialize(bdev_init_cb, NULL); 1795 1796 bdev = allocate_bdev("bdev0"); 1797 1798 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1799 CU_ASSERT(rc == 0); 1800 SPDK_CU_ASSERT_FATAL(desc != NULL); 1801 io_ch = spdk_bdev_get_io_channel(desc); 1802 CU_ASSERT(io_ch != NULL); 1803 1804 bdev->split_on_optimal_io_boundary = false; 1805 bdev->optimal_io_boundary = 0; 1806 1807 /* Case 0 max_num_segments == 0. 1808 * but segment size 2 * 512 > 512 1809 */ 1810 bdev->max_segment_size = 512; 1811 bdev->max_num_segments = 0; 1812 g_io_done = false; 1813 1814 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1815 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1816 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1817 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1818 1819 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1820 CU_ASSERT(rc == 0); 1821 CU_ASSERT(g_io_done == false); 1822 1823 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1824 stub_complete_io(1); 1825 CU_ASSERT(g_io_done == true); 1826 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1827 1828 /* Case 1 max_segment_size == 0 1829 * but iov num 2 > 1. 1830 */ 1831 bdev->max_segment_size = 0; 1832 bdev->max_num_segments = 1; 1833 g_io_done = false; 1834 1835 iov[0].iov_base = (void *)0x10000; 1836 iov[0].iov_len = 512; 1837 iov[1].iov_base = (void *)0x20000; 1838 iov[1].iov_len = 8 * 512; 1839 1840 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1841 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1842 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1843 1844 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1845 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1846 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1847 1848 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1849 CU_ASSERT(rc == 0); 1850 CU_ASSERT(g_io_done == false); 1851 1852 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1853 stub_complete_io(2); 1854 CU_ASSERT(g_io_done == true); 1855 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1856 1857 /* Test that a non-vector command is split correctly. 1858 * Set up the expected values before calling spdk_bdev_read_blocks 1859 */ 1860 bdev->max_segment_size = 512; 1861 bdev->max_num_segments = 1; 1862 g_io_done = false; 1863 1864 /* Child IO 0 */ 1865 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1866 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1867 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1868 1869 /* Child IO 1 */ 1870 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1871 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1872 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1873 1874 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1875 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1876 CU_ASSERT(rc == 0); 1877 CU_ASSERT(g_io_done == false); 1878 1879 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1880 stub_complete_io(2); 1881 CU_ASSERT(g_io_done == true); 1882 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1883 1884 /* Now set up a more complex, multi-vector command that needs to be split, 1885 * including splitting iovecs. 1886 */ 1887 bdev->max_segment_size = 2 * 512; 1888 bdev->max_num_segments = 1; 1889 g_io_done = false; 1890 1891 iov[0].iov_base = (void *)0x10000; 1892 iov[0].iov_len = 2 * 512; 1893 iov[1].iov_base = (void *)0x20000; 1894 iov[1].iov_len = 4 * 512; 1895 iov[2].iov_base = (void *)0x30000; 1896 iov[2].iov_len = 6 * 512; 1897 1898 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1899 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1900 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1901 1902 /* Split iov[1].size to 2 iov entries then split the segments */ 1903 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1904 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1905 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1906 1907 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1908 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1909 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1910 1911 /* Split iov[2].size to 3 iov entries then split the segments */ 1912 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1913 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1914 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1915 1916 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1917 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1918 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1919 1920 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1921 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1922 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1923 1924 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1925 CU_ASSERT(rc == 0); 1926 CU_ASSERT(g_io_done == false); 1927 1928 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 1929 stub_complete_io(6); 1930 CU_ASSERT(g_io_done == true); 1931 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1932 1933 /* Test multi vector command that needs to be split by strip and then needs to be 1934 * split further due to the capacity of parent IO child iovs. 1935 */ 1936 bdev->max_segment_size = 512; 1937 bdev->max_num_segments = 1; 1938 g_io_done = false; 1939 1940 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1941 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1942 iov[i].iov_len = 512 * 2; 1943 } 1944 1945 /* Each input iov.size is split into 2 iovs, 1946 * half of the input iov can fill all child iov entries of a single IO. 1947 */ 1948 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 1949 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 1950 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1951 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1952 1953 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 1954 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1955 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1956 } 1957 1958 /* The remaining iov is split in the second round */ 1959 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1960 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 1961 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1962 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1963 1964 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 1965 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1966 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1967 } 1968 1969 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 1970 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1971 CU_ASSERT(rc == 0); 1972 CU_ASSERT(g_io_done == false); 1973 1974 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1975 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1976 CU_ASSERT(g_io_done == false); 1977 1978 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1979 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1980 CU_ASSERT(g_io_done == true); 1981 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1982 1983 /* A wrong case, a child IO that is divided does 1984 * not meet the principle of multiples of block size, 1985 * and exits with error 1986 */ 1987 bdev->max_segment_size = 512; 1988 bdev->max_num_segments = 1; 1989 g_io_done = false; 1990 1991 iov[0].iov_base = (void *)0x10000; 1992 iov[0].iov_len = 512 + 256; 1993 iov[1].iov_base = (void *)0x20000; 1994 iov[1].iov_len = 256; 1995 1996 /* iov[0] is split to 512 and 256. 1997 * 256 is less than a block size, and it is found 1998 * in the next round of split that it is the first child IO smaller than 1999 * the block size, so the error exit 2000 */ 2001 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2002 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2003 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2004 2005 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2006 CU_ASSERT(rc == 0); 2007 CU_ASSERT(g_io_done == false); 2008 2009 /* First child IO is OK */ 2010 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2011 stub_complete_io(1); 2012 CU_ASSERT(g_io_done == true); 2013 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2014 2015 /* error exit */ 2016 stub_complete_io(1); 2017 CU_ASSERT(g_io_done == true); 2018 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2019 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2020 2021 /* Test multi vector command that needs to be split by strip and then needs to be 2022 * split further due to the capacity of child iovs. 2023 * 2024 * In this case, the last two iovs need to be split, but it will exceed the capacity 2025 * of child iovs, so it needs to wait until the first batch completed. 2026 */ 2027 bdev->max_segment_size = 512; 2028 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2029 g_io_done = false; 2030 2031 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2032 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2033 iov[i].iov_len = 512; 2034 } 2035 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2036 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2037 iov[i].iov_len = 512 * 2; 2038 } 2039 2040 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2041 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2042 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2043 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2044 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2045 } 2046 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2047 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2048 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2049 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2050 2051 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2052 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2053 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2054 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2055 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2056 2057 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2058 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2059 CU_ASSERT(rc == 0); 2060 CU_ASSERT(g_io_done == false); 2061 2062 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2063 stub_complete_io(1); 2064 CU_ASSERT(g_io_done == false); 2065 2066 /* Next round */ 2067 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2068 stub_complete_io(1); 2069 CU_ASSERT(g_io_done == true); 2070 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2071 2072 /* This case is similar to the previous one, but the io composed of 2073 * the last few entries of child iov is not enough for a blocklen, so they 2074 * cannot be put into this IO, but wait until the next time. 2075 */ 2076 bdev->max_segment_size = 512; 2077 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2078 g_io_done = false; 2079 2080 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2081 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2082 iov[i].iov_len = 512; 2083 } 2084 2085 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2086 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2087 iov[i].iov_len = 128; 2088 } 2089 2090 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2091 * Because the left 2 iov is not enough for a blocklen. 2092 */ 2093 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2094 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2095 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2096 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2097 } 2098 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2099 2100 /* The second child io waits until the end of the first child io before executing. 2101 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2102 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2103 */ 2104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2105 1, 4); 2106 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2107 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2108 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2109 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2110 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2111 2112 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2113 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2114 CU_ASSERT(rc == 0); 2115 CU_ASSERT(g_io_done == false); 2116 2117 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2118 stub_complete_io(1); 2119 CU_ASSERT(g_io_done == false); 2120 2121 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2122 stub_complete_io(1); 2123 CU_ASSERT(g_io_done == true); 2124 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2125 2126 /* A very complicated case. Each sg entry exceeds max_segment_size and 2127 * needs to be split. At the same time, child io must be a multiple of blocklen. 2128 * At the same time, child iovcnt exceeds parent iovcnt. 2129 */ 2130 bdev->max_segment_size = 512 + 128; 2131 bdev->max_num_segments = 3; 2132 g_io_done = false; 2133 2134 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2135 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2136 iov[i].iov_len = 512 + 256; 2137 } 2138 2139 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2140 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2141 iov[i].iov_len = 512 + 128; 2142 } 2143 2144 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2145 * Consume 4 parent IO iov entries per for() round and 6 block size. 2146 * Generate 9 child IOs. 2147 */ 2148 for (i = 0; i < 3; i++) { 2149 uint32_t j = i * 4; 2150 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2151 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2152 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2153 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2154 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2155 2156 /* Child io must be a multiple of blocklen 2157 * iov[j + 2] must be split. If the third entry is also added, 2158 * the multiple of blocklen cannot be guaranteed. But it still 2159 * occupies one iov entry of the parent child iov. 2160 */ 2161 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2162 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2163 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2164 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2165 2166 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2167 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2168 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2169 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2170 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2171 } 2172 2173 /* Child iov position at 27, the 10th child IO 2174 * iov entry index is 3 * 4 and offset is 3 * 6 2175 */ 2176 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2177 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2178 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2179 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2180 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2181 2182 /* Child iov position at 30, the 11th child IO */ 2183 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2184 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2185 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2186 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2187 2188 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2189 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2190 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2191 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2192 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2193 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2194 2195 /* Consume 9 child IOs and 27 child iov entries. 2196 * Consume 4 parent IO iov entries per for() round and 6 block size. 2197 * Parent IO iov index start from 16 and block offset start from 24 2198 */ 2199 for (i = 0; i < 3; i++) { 2200 uint32_t j = i * 4 + 16; 2201 uint32_t offset = i * 6 + 24; 2202 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2203 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2204 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2205 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2206 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2207 2208 /* Child io must be a multiple of blocklen 2209 * iov[j + 2] must be split. If the third entry is also added, 2210 * the multiple of blocklen cannot be guaranteed. But it still 2211 * occupies one iov entry of the parent child iov. 2212 */ 2213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2214 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2215 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2216 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2217 2218 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2219 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2220 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2221 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2222 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2223 } 2224 2225 /* The 22th child IO, child iov position at 30 */ 2226 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2227 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2228 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2229 2230 /* The third round */ 2231 /* Here is the 23nd child IO and child iovpos is 0 */ 2232 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2233 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2234 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2235 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2236 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2237 2238 /* The 24th child IO */ 2239 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2240 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2241 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2242 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2243 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2244 2245 /* The 25th child IO */ 2246 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2247 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2248 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2249 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2250 2251 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2252 50, io_done, NULL); 2253 CU_ASSERT(rc == 0); 2254 CU_ASSERT(g_io_done == false); 2255 2256 /* Parent IO supports up to 32 child iovs, so it is calculated that 2257 * a maximum of 11 IOs can be split at a time, and the 2258 * splitting will continue after the first batch is over. 2259 */ 2260 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2261 stub_complete_io(11); 2262 CU_ASSERT(g_io_done == false); 2263 2264 /* The 2nd round */ 2265 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2266 stub_complete_io(11); 2267 CU_ASSERT(g_io_done == false); 2268 2269 /* The last round */ 2270 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2271 stub_complete_io(3); 2272 CU_ASSERT(g_io_done == true); 2273 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2274 2275 /* Test an WRITE_ZEROES. This should also not be split. */ 2276 bdev->max_segment_size = 512; 2277 bdev->max_num_segments = 1; 2278 g_io_done = false; 2279 2280 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2281 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2282 2283 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2284 CU_ASSERT(rc == 0); 2285 CU_ASSERT(g_io_done == false); 2286 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2287 stub_complete_io(1); 2288 CU_ASSERT(g_io_done == true); 2289 2290 /* Test an UNMAP. This should also not be split. */ 2291 g_io_done = false; 2292 2293 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2294 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2295 2296 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2297 CU_ASSERT(rc == 0); 2298 CU_ASSERT(g_io_done == false); 2299 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2300 stub_complete_io(1); 2301 CU_ASSERT(g_io_done == true); 2302 2303 /* Test a FLUSH. This should also not be split. */ 2304 g_io_done = false; 2305 2306 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2307 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2308 2309 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2310 CU_ASSERT(rc == 0); 2311 CU_ASSERT(g_io_done == false); 2312 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2313 stub_complete_io(1); 2314 CU_ASSERT(g_io_done == true); 2315 2316 spdk_put_io_channel(io_ch); 2317 spdk_bdev_close(desc); 2318 free_bdev(bdev); 2319 spdk_bdev_finish(bdev_fini_cb, NULL); 2320 poll_threads(); 2321 } 2322 2323 static void 2324 bdev_io_mix_split_test(void) 2325 { 2326 struct spdk_bdev *bdev; 2327 struct spdk_bdev_desc *desc = NULL; 2328 struct spdk_io_channel *io_ch; 2329 struct spdk_bdev_opts bdev_opts = {}; 2330 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2331 struct ut_expected_io *expected_io; 2332 uint64_t i; 2333 int rc; 2334 2335 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2336 bdev_opts.bdev_io_pool_size = 512; 2337 bdev_opts.bdev_io_cache_size = 64; 2338 2339 rc = spdk_bdev_set_opts(&bdev_opts); 2340 CU_ASSERT(rc == 0); 2341 spdk_bdev_initialize(bdev_init_cb, NULL); 2342 2343 bdev = allocate_bdev("bdev0"); 2344 2345 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2346 CU_ASSERT(rc == 0); 2347 SPDK_CU_ASSERT_FATAL(desc != NULL); 2348 io_ch = spdk_bdev_get_io_channel(desc); 2349 CU_ASSERT(io_ch != NULL); 2350 2351 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2352 bdev->split_on_optimal_io_boundary = true; 2353 bdev->optimal_io_boundary = 16; 2354 2355 bdev->max_segment_size = 512; 2356 bdev->max_num_segments = 16; 2357 g_io_done = false; 2358 2359 /* IO crossing the IO boundary requires split 2360 * Total 2 child IOs. 2361 */ 2362 2363 /* The 1st child IO split the segment_size to multiple segment entry */ 2364 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2365 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2366 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2367 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2368 2369 /* The 2nd child IO split the segment_size to multiple segment entry */ 2370 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2371 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2372 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2373 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2374 2375 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2376 CU_ASSERT(rc == 0); 2377 CU_ASSERT(g_io_done == false); 2378 2379 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2380 stub_complete_io(2); 2381 CU_ASSERT(g_io_done == true); 2382 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2383 2384 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2385 bdev->max_segment_size = 15 * 512; 2386 bdev->max_num_segments = 1; 2387 g_io_done = false; 2388 2389 /* IO crossing the IO boundary requires split. 2390 * The 1st child IO segment size exceeds the max_segment_size, 2391 * So 1st child IO will be splitted to multiple segment entry. 2392 * Then it split to 2 child IOs because of the max_num_segments. 2393 * Total 3 child IOs. 2394 */ 2395 2396 /* The first 2 IOs are in an IO boundary. 2397 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2398 * So it split to the first 2 IOs. 2399 */ 2400 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2401 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2402 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2403 2404 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2405 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2406 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2407 2408 /* The 3rd Child IO is because of the io boundary */ 2409 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2410 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2411 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2412 2413 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2414 CU_ASSERT(rc == 0); 2415 CU_ASSERT(g_io_done == false); 2416 2417 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2418 stub_complete_io(3); 2419 CU_ASSERT(g_io_done == true); 2420 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2421 2422 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2423 bdev->max_segment_size = 17 * 512; 2424 bdev->max_num_segments = 1; 2425 g_io_done = false; 2426 2427 /* IO crossing the IO boundary requires split. 2428 * Child IO does not split. 2429 * Total 2 child IOs. 2430 */ 2431 2432 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2433 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2434 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2435 2436 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2437 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2438 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2439 2440 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2441 CU_ASSERT(rc == 0); 2442 CU_ASSERT(g_io_done == false); 2443 2444 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2445 stub_complete_io(2); 2446 CU_ASSERT(g_io_done == true); 2447 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2448 2449 /* Now set up a more complex, multi-vector command that needs to be split, 2450 * including splitting iovecs. 2451 * optimal_io_boundary < max_segment_size * max_num_segments 2452 */ 2453 bdev->max_segment_size = 3 * 512; 2454 bdev->max_num_segments = 6; 2455 g_io_done = false; 2456 2457 iov[0].iov_base = (void *)0x10000; 2458 iov[0].iov_len = 4 * 512; 2459 iov[1].iov_base = (void *)0x20000; 2460 iov[1].iov_len = 4 * 512; 2461 iov[2].iov_base = (void *)0x30000; 2462 iov[2].iov_len = 10 * 512; 2463 2464 /* IO crossing the IO boundary requires split. 2465 * The 1st child IO segment size exceeds the max_segment_size and after 2466 * splitting segment_size, the num_segments exceeds max_num_segments. 2467 * So 1st child IO will be splitted to 2 child IOs. 2468 * Total 3 child IOs. 2469 */ 2470 2471 /* The first 2 IOs are in an IO boundary. 2472 * After splitting segment size the segment num exceeds. 2473 * So it splits to 2 child IOs. 2474 */ 2475 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2476 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2477 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2478 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2479 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2480 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2481 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2482 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2483 2484 /* The 2nd child IO has the left segment entry */ 2485 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2486 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2487 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2488 2489 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2490 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2491 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2492 2493 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2494 CU_ASSERT(rc == 0); 2495 CU_ASSERT(g_io_done == false); 2496 2497 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2498 stub_complete_io(3); 2499 CU_ASSERT(g_io_done == true); 2500 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2501 2502 /* A very complicated case. Each sg entry exceeds max_segment_size 2503 * and split on io boundary. 2504 * optimal_io_boundary < max_segment_size * max_num_segments 2505 */ 2506 bdev->max_segment_size = 3 * 512; 2507 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2508 g_io_done = false; 2509 2510 for (i = 0; i < 20; i++) { 2511 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2512 iov[i].iov_len = 512 * 4; 2513 } 2514 2515 /* IO crossing the IO boundary requires split. 2516 * 80 block length can split 5 child IOs base on offset and IO boundary. 2517 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2518 * Total 5 child IOs. 2519 */ 2520 2521 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2522 * So each child IO occupies 8 child iov entries. 2523 */ 2524 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2525 for (i = 0; i < 4; i++) { 2526 int iovcnt = i * 2; 2527 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2528 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2529 } 2530 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2531 2532 /* 2nd child IO and total 16 child iov entries of parent IO */ 2533 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2534 for (i = 4; i < 8; i++) { 2535 int iovcnt = (i - 4) * 2; 2536 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2537 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2538 } 2539 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2540 2541 /* 3rd child IO and total 24 child iov entries of parent IO */ 2542 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2543 for (i = 8; i < 12; i++) { 2544 int iovcnt = (i - 8) * 2; 2545 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2546 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2547 } 2548 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2549 2550 /* 4th child IO and total 32 child iov entries of parent IO */ 2551 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2552 for (i = 12; i < 16; i++) { 2553 int iovcnt = (i - 12) * 2; 2554 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2555 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2556 } 2557 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2558 2559 /* 5th child IO and because of the child iov entry it should be splitted 2560 * in next round. 2561 */ 2562 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2563 for (i = 16; i < 20; i++) { 2564 int iovcnt = (i - 16) * 2; 2565 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2566 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2567 } 2568 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2569 2570 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2571 CU_ASSERT(rc == 0); 2572 CU_ASSERT(g_io_done == false); 2573 2574 /* First split round */ 2575 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2576 stub_complete_io(4); 2577 CU_ASSERT(g_io_done == false); 2578 2579 /* Second split round */ 2580 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2581 stub_complete_io(1); 2582 CU_ASSERT(g_io_done == true); 2583 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2584 2585 spdk_put_io_channel(io_ch); 2586 spdk_bdev_close(desc); 2587 free_bdev(bdev); 2588 spdk_bdev_finish(bdev_fini_cb, NULL); 2589 poll_threads(); 2590 } 2591 2592 static void 2593 bdev_io_split_with_io_wait(void) 2594 { 2595 struct spdk_bdev *bdev; 2596 struct spdk_bdev_desc *desc = NULL; 2597 struct spdk_io_channel *io_ch; 2598 struct spdk_bdev_channel *channel; 2599 struct spdk_bdev_mgmt_channel *mgmt_ch; 2600 struct spdk_bdev_opts bdev_opts = {}; 2601 struct iovec iov[3]; 2602 struct ut_expected_io *expected_io; 2603 int rc; 2604 2605 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2606 bdev_opts.bdev_io_pool_size = 2; 2607 bdev_opts.bdev_io_cache_size = 1; 2608 2609 rc = spdk_bdev_set_opts(&bdev_opts); 2610 CU_ASSERT(rc == 0); 2611 spdk_bdev_initialize(bdev_init_cb, NULL); 2612 2613 bdev = allocate_bdev("bdev0"); 2614 2615 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2616 CU_ASSERT(rc == 0); 2617 CU_ASSERT(desc != NULL); 2618 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2619 io_ch = spdk_bdev_get_io_channel(desc); 2620 CU_ASSERT(io_ch != NULL); 2621 channel = spdk_io_channel_get_ctx(io_ch); 2622 mgmt_ch = channel->shared_resource->mgmt_ch; 2623 2624 bdev->optimal_io_boundary = 16; 2625 bdev->split_on_optimal_io_boundary = true; 2626 2627 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2628 CU_ASSERT(rc == 0); 2629 2630 /* Now test that a single-vector command is split correctly. 2631 * Offset 14, length 8, payload 0xF000 2632 * Child - Offset 14, length 2, payload 0xF000 2633 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2634 * 2635 * Set up the expected values before calling spdk_bdev_read_blocks 2636 */ 2637 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2638 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2640 2641 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2642 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2643 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2644 2645 /* The following children will be submitted sequentially due to the capacity of 2646 * spdk_bdev_io. 2647 */ 2648 2649 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2650 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2651 CU_ASSERT(rc == 0); 2652 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2653 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2654 2655 /* Completing the first read I/O will submit the first child */ 2656 stub_complete_io(1); 2657 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2658 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2659 2660 /* Completing the first child will submit the second child */ 2661 stub_complete_io(1); 2662 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2663 2664 /* Complete the second child I/O. This should result in our callback getting 2665 * invoked since the parent I/O is now complete. 2666 */ 2667 stub_complete_io(1); 2668 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2669 2670 /* Now set up a more complex, multi-vector command that needs to be split, 2671 * including splitting iovecs. 2672 */ 2673 iov[0].iov_base = (void *)0x10000; 2674 iov[0].iov_len = 512; 2675 iov[1].iov_base = (void *)0x20000; 2676 iov[1].iov_len = 20 * 512; 2677 iov[2].iov_base = (void *)0x30000; 2678 iov[2].iov_len = 11 * 512; 2679 2680 g_io_done = false; 2681 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2682 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2683 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2684 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2685 2686 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2687 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2688 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2689 2690 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2691 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2692 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2693 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2694 2695 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2696 CU_ASSERT(rc == 0); 2697 CU_ASSERT(g_io_done == false); 2698 2699 /* The following children will be submitted sequentially due to the capacity of 2700 * spdk_bdev_io. 2701 */ 2702 2703 /* Completing the first child will submit the second child */ 2704 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2705 stub_complete_io(1); 2706 CU_ASSERT(g_io_done == false); 2707 2708 /* Completing the second child will submit the third child */ 2709 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2710 stub_complete_io(1); 2711 CU_ASSERT(g_io_done == false); 2712 2713 /* Completing the third child will result in our callback getting invoked 2714 * since the parent I/O is now complete. 2715 */ 2716 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2717 stub_complete_io(1); 2718 CU_ASSERT(g_io_done == true); 2719 2720 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2721 2722 spdk_put_io_channel(io_ch); 2723 spdk_bdev_close(desc); 2724 free_bdev(bdev); 2725 spdk_bdev_finish(bdev_fini_cb, NULL); 2726 poll_threads(); 2727 } 2728 2729 static void 2730 bdev_io_alignment(void) 2731 { 2732 struct spdk_bdev *bdev; 2733 struct spdk_bdev_desc *desc = NULL; 2734 struct spdk_io_channel *io_ch; 2735 struct spdk_bdev_opts bdev_opts = {}; 2736 int rc; 2737 void *buf = NULL; 2738 struct iovec iovs[2]; 2739 int iovcnt; 2740 uint64_t alignment; 2741 2742 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2743 bdev_opts.bdev_io_pool_size = 20; 2744 bdev_opts.bdev_io_cache_size = 2; 2745 2746 rc = spdk_bdev_set_opts(&bdev_opts); 2747 CU_ASSERT(rc == 0); 2748 spdk_bdev_initialize(bdev_init_cb, NULL); 2749 2750 fn_table.submit_request = stub_submit_request_get_buf; 2751 bdev = allocate_bdev("bdev0"); 2752 2753 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2754 CU_ASSERT(rc == 0); 2755 CU_ASSERT(desc != NULL); 2756 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2757 io_ch = spdk_bdev_get_io_channel(desc); 2758 CU_ASSERT(io_ch != NULL); 2759 2760 /* Create aligned buffer */ 2761 rc = posix_memalign(&buf, 4096, 8192); 2762 SPDK_CU_ASSERT_FATAL(rc == 0); 2763 2764 /* Pass aligned single buffer with no alignment required */ 2765 alignment = 1; 2766 bdev->required_alignment = spdk_u32log2(alignment); 2767 2768 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2769 CU_ASSERT(rc == 0); 2770 stub_complete_io(1); 2771 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2772 alignment)); 2773 2774 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2775 CU_ASSERT(rc == 0); 2776 stub_complete_io(1); 2777 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2778 alignment)); 2779 2780 /* Pass unaligned single buffer with no alignment required */ 2781 alignment = 1; 2782 bdev->required_alignment = spdk_u32log2(alignment); 2783 2784 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2785 CU_ASSERT(rc == 0); 2786 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2787 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2788 stub_complete_io(1); 2789 2790 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2791 CU_ASSERT(rc == 0); 2792 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2793 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2794 stub_complete_io(1); 2795 2796 /* Pass unaligned single buffer with 512 alignment required */ 2797 alignment = 512; 2798 bdev->required_alignment = spdk_u32log2(alignment); 2799 2800 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2801 CU_ASSERT(rc == 0); 2802 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2803 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2804 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2805 alignment)); 2806 stub_complete_io(1); 2807 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2808 2809 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2810 CU_ASSERT(rc == 0); 2811 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2812 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2813 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2814 alignment)); 2815 stub_complete_io(1); 2816 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2817 2818 /* Pass unaligned single buffer with 4096 alignment required */ 2819 alignment = 4096; 2820 bdev->required_alignment = spdk_u32log2(alignment); 2821 2822 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2823 CU_ASSERT(rc == 0); 2824 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2825 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2826 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2827 alignment)); 2828 stub_complete_io(1); 2829 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2830 2831 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2832 CU_ASSERT(rc == 0); 2833 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2834 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2835 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2836 alignment)); 2837 stub_complete_io(1); 2838 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2839 2840 /* Pass aligned iovs with no alignment required */ 2841 alignment = 1; 2842 bdev->required_alignment = spdk_u32log2(alignment); 2843 2844 iovcnt = 1; 2845 iovs[0].iov_base = buf; 2846 iovs[0].iov_len = 512; 2847 2848 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2849 CU_ASSERT(rc == 0); 2850 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2851 stub_complete_io(1); 2852 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2853 2854 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2855 CU_ASSERT(rc == 0); 2856 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2857 stub_complete_io(1); 2858 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2859 2860 /* Pass unaligned iovs with no alignment required */ 2861 alignment = 1; 2862 bdev->required_alignment = spdk_u32log2(alignment); 2863 2864 iovcnt = 2; 2865 iovs[0].iov_base = buf + 16; 2866 iovs[0].iov_len = 256; 2867 iovs[1].iov_base = buf + 16 + 256 + 32; 2868 iovs[1].iov_len = 256; 2869 2870 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2871 CU_ASSERT(rc == 0); 2872 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2873 stub_complete_io(1); 2874 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2875 2876 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2877 CU_ASSERT(rc == 0); 2878 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2879 stub_complete_io(1); 2880 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2881 2882 /* Pass unaligned iov with 2048 alignment required */ 2883 alignment = 2048; 2884 bdev->required_alignment = spdk_u32log2(alignment); 2885 2886 iovcnt = 2; 2887 iovs[0].iov_base = buf + 16; 2888 iovs[0].iov_len = 256; 2889 iovs[1].iov_base = buf + 16 + 256 + 32; 2890 iovs[1].iov_len = 256; 2891 2892 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2893 CU_ASSERT(rc == 0); 2894 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2895 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2896 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2897 alignment)); 2898 stub_complete_io(1); 2899 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2900 2901 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2902 CU_ASSERT(rc == 0); 2903 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2904 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2905 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2906 alignment)); 2907 stub_complete_io(1); 2908 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2909 2910 /* Pass iov without allocated buffer without alignment required */ 2911 alignment = 1; 2912 bdev->required_alignment = spdk_u32log2(alignment); 2913 2914 iovcnt = 1; 2915 iovs[0].iov_base = NULL; 2916 iovs[0].iov_len = 0; 2917 2918 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2919 CU_ASSERT(rc == 0); 2920 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2921 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2922 alignment)); 2923 stub_complete_io(1); 2924 2925 /* Pass iov without allocated buffer with 1024 alignment required */ 2926 alignment = 1024; 2927 bdev->required_alignment = spdk_u32log2(alignment); 2928 2929 iovcnt = 1; 2930 iovs[0].iov_base = NULL; 2931 iovs[0].iov_len = 0; 2932 2933 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2934 CU_ASSERT(rc == 0); 2935 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2936 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2937 alignment)); 2938 stub_complete_io(1); 2939 2940 spdk_put_io_channel(io_ch); 2941 spdk_bdev_close(desc); 2942 free_bdev(bdev); 2943 fn_table.submit_request = stub_submit_request; 2944 spdk_bdev_finish(bdev_fini_cb, NULL); 2945 poll_threads(); 2946 2947 free(buf); 2948 } 2949 2950 static void 2951 bdev_io_alignment_with_boundary(void) 2952 { 2953 struct spdk_bdev *bdev; 2954 struct spdk_bdev_desc *desc = NULL; 2955 struct spdk_io_channel *io_ch; 2956 struct spdk_bdev_opts bdev_opts = {}; 2957 int rc; 2958 void *buf = NULL; 2959 struct iovec iovs[2]; 2960 int iovcnt; 2961 uint64_t alignment; 2962 2963 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2964 bdev_opts.bdev_io_pool_size = 20; 2965 bdev_opts.bdev_io_cache_size = 2; 2966 2967 bdev_opts.opts_size = sizeof(bdev_opts); 2968 rc = spdk_bdev_set_opts(&bdev_opts); 2969 CU_ASSERT(rc == 0); 2970 spdk_bdev_initialize(bdev_init_cb, NULL); 2971 2972 fn_table.submit_request = stub_submit_request_get_buf; 2973 bdev = allocate_bdev("bdev0"); 2974 2975 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2976 CU_ASSERT(rc == 0); 2977 CU_ASSERT(desc != NULL); 2978 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2979 io_ch = spdk_bdev_get_io_channel(desc); 2980 CU_ASSERT(io_ch != NULL); 2981 2982 /* Create aligned buffer */ 2983 rc = posix_memalign(&buf, 4096, 131072); 2984 SPDK_CU_ASSERT_FATAL(rc == 0); 2985 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2986 2987 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2988 alignment = 512; 2989 bdev->required_alignment = spdk_u32log2(alignment); 2990 bdev->optimal_io_boundary = 2; 2991 bdev->split_on_optimal_io_boundary = true; 2992 2993 iovcnt = 1; 2994 iovs[0].iov_base = NULL; 2995 iovs[0].iov_len = 512 * 3; 2996 2997 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2998 CU_ASSERT(rc == 0); 2999 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3000 stub_complete_io(2); 3001 3002 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3003 alignment = 512; 3004 bdev->required_alignment = spdk_u32log2(alignment); 3005 bdev->optimal_io_boundary = 16; 3006 bdev->split_on_optimal_io_boundary = true; 3007 3008 iovcnt = 1; 3009 iovs[0].iov_base = NULL; 3010 iovs[0].iov_len = 512 * 16; 3011 3012 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3013 CU_ASSERT(rc == 0); 3014 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3015 stub_complete_io(2); 3016 3017 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3018 alignment = 512; 3019 bdev->required_alignment = spdk_u32log2(alignment); 3020 bdev->optimal_io_boundary = 128; 3021 bdev->split_on_optimal_io_boundary = true; 3022 3023 iovcnt = 1; 3024 iovs[0].iov_base = buf + 16; 3025 iovs[0].iov_len = 512 * 160; 3026 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3027 CU_ASSERT(rc == 0); 3028 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3029 stub_complete_io(2); 3030 3031 /* 512 * 3 with 2 IO boundary */ 3032 alignment = 512; 3033 bdev->required_alignment = spdk_u32log2(alignment); 3034 bdev->optimal_io_boundary = 2; 3035 bdev->split_on_optimal_io_boundary = true; 3036 3037 iovcnt = 2; 3038 iovs[0].iov_base = buf + 16; 3039 iovs[0].iov_len = 512; 3040 iovs[1].iov_base = buf + 16 + 512 + 32; 3041 iovs[1].iov_len = 1024; 3042 3043 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3044 CU_ASSERT(rc == 0); 3045 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3046 stub_complete_io(2); 3047 3048 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3049 CU_ASSERT(rc == 0); 3050 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3051 stub_complete_io(2); 3052 3053 /* 512 * 64 with 32 IO boundary */ 3054 bdev->optimal_io_boundary = 32; 3055 iovcnt = 2; 3056 iovs[0].iov_base = buf + 16; 3057 iovs[0].iov_len = 16384; 3058 iovs[1].iov_base = buf + 16 + 16384 + 32; 3059 iovs[1].iov_len = 16384; 3060 3061 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3062 CU_ASSERT(rc == 0); 3063 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3064 stub_complete_io(3); 3065 3066 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3067 CU_ASSERT(rc == 0); 3068 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3069 stub_complete_io(3); 3070 3071 /* 512 * 160 with 32 IO boundary */ 3072 iovcnt = 1; 3073 iovs[0].iov_base = buf + 16; 3074 iovs[0].iov_len = 16384 + 65536; 3075 3076 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3077 CU_ASSERT(rc == 0); 3078 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3079 stub_complete_io(6); 3080 3081 spdk_put_io_channel(io_ch); 3082 spdk_bdev_close(desc); 3083 free_bdev(bdev); 3084 fn_table.submit_request = stub_submit_request; 3085 spdk_bdev_finish(bdev_fini_cb, NULL); 3086 poll_threads(); 3087 3088 free(buf); 3089 } 3090 3091 static void 3092 histogram_status_cb(void *cb_arg, int status) 3093 { 3094 g_status = status; 3095 } 3096 3097 static void 3098 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3099 { 3100 g_status = status; 3101 g_histogram = histogram; 3102 } 3103 3104 static void 3105 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3106 uint64_t total, uint64_t so_far) 3107 { 3108 g_count += count; 3109 } 3110 3111 static void 3112 bdev_histograms(void) 3113 { 3114 struct spdk_bdev *bdev; 3115 struct spdk_bdev_desc *desc = NULL; 3116 struct spdk_io_channel *ch; 3117 struct spdk_histogram_data *histogram; 3118 uint8_t buf[4096]; 3119 int rc; 3120 3121 spdk_bdev_initialize(bdev_init_cb, NULL); 3122 3123 bdev = allocate_bdev("bdev"); 3124 3125 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3126 CU_ASSERT(rc == 0); 3127 CU_ASSERT(desc != NULL); 3128 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3129 3130 ch = spdk_bdev_get_io_channel(desc); 3131 CU_ASSERT(ch != NULL); 3132 3133 /* Enable histogram */ 3134 g_status = -1; 3135 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3136 poll_threads(); 3137 CU_ASSERT(g_status == 0); 3138 CU_ASSERT(bdev->internal.histogram_enabled == true); 3139 3140 /* Allocate histogram */ 3141 histogram = spdk_histogram_data_alloc(); 3142 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3143 3144 /* Check if histogram is zeroed */ 3145 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3146 poll_threads(); 3147 CU_ASSERT(g_status == 0); 3148 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3149 3150 g_count = 0; 3151 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3152 3153 CU_ASSERT(g_count == 0); 3154 3155 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3156 CU_ASSERT(rc == 0); 3157 3158 spdk_delay_us(10); 3159 stub_complete_io(1); 3160 poll_threads(); 3161 3162 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3163 CU_ASSERT(rc == 0); 3164 3165 spdk_delay_us(10); 3166 stub_complete_io(1); 3167 poll_threads(); 3168 3169 /* Check if histogram gathered data from all I/O channels */ 3170 g_histogram = NULL; 3171 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3172 poll_threads(); 3173 CU_ASSERT(g_status == 0); 3174 CU_ASSERT(bdev->internal.histogram_enabled == true); 3175 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3176 3177 g_count = 0; 3178 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3179 CU_ASSERT(g_count == 2); 3180 3181 /* Disable histogram */ 3182 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3183 poll_threads(); 3184 CU_ASSERT(g_status == 0); 3185 CU_ASSERT(bdev->internal.histogram_enabled == false); 3186 3187 /* Try to run histogram commands on disabled bdev */ 3188 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3189 poll_threads(); 3190 CU_ASSERT(g_status == -EFAULT); 3191 3192 spdk_histogram_data_free(histogram); 3193 spdk_put_io_channel(ch); 3194 spdk_bdev_close(desc); 3195 free_bdev(bdev); 3196 spdk_bdev_finish(bdev_fini_cb, NULL); 3197 poll_threads(); 3198 } 3199 3200 static void 3201 _bdev_compare(bool emulated) 3202 { 3203 struct spdk_bdev *bdev; 3204 struct spdk_bdev_desc *desc = NULL; 3205 struct spdk_io_channel *ioch; 3206 struct ut_expected_io *expected_io; 3207 uint64_t offset, num_blocks; 3208 uint32_t num_completed; 3209 char aa_buf[512]; 3210 char bb_buf[512]; 3211 struct iovec compare_iov; 3212 uint8_t io_type; 3213 int rc; 3214 3215 if (emulated) { 3216 io_type = SPDK_BDEV_IO_TYPE_READ; 3217 } else { 3218 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3219 } 3220 3221 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3222 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3223 3224 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3225 3226 spdk_bdev_initialize(bdev_init_cb, NULL); 3227 fn_table.submit_request = stub_submit_request_get_buf; 3228 bdev = allocate_bdev("bdev"); 3229 3230 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3231 CU_ASSERT_EQUAL(rc, 0); 3232 SPDK_CU_ASSERT_FATAL(desc != NULL); 3233 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3234 ioch = spdk_bdev_get_io_channel(desc); 3235 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3236 3237 fn_table.submit_request = stub_submit_request_get_buf; 3238 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3239 3240 offset = 50; 3241 num_blocks = 1; 3242 compare_iov.iov_base = aa_buf; 3243 compare_iov.iov_len = sizeof(aa_buf); 3244 3245 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3246 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3247 3248 g_io_done = false; 3249 g_compare_read_buf = aa_buf; 3250 g_compare_read_buf_len = sizeof(aa_buf); 3251 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3252 CU_ASSERT_EQUAL(rc, 0); 3253 num_completed = stub_complete_io(1); 3254 CU_ASSERT_EQUAL(num_completed, 1); 3255 CU_ASSERT(g_io_done == true); 3256 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3257 3258 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3259 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3260 3261 g_io_done = false; 3262 g_compare_read_buf = bb_buf; 3263 g_compare_read_buf_len = sizeof(bb_buf); 3264 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3265 CU_ASSERT_EQUAL(rc, 0); 3266 num_completed = stub_complete_io(1); 3267 CU_ASSERT_EQUAL(num_completed, 1); 3268 CU_ASSERT(g_io_done == true); 3269 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3270 3271 spdk_put_io_channel(ioch); 3272 spdk_bdev_close(desc); 3273 free_bdev(bdev); 3274 fn_table.submit_request = stub_submit_request; 3275 spdk_bdev_finish(bdev_fini_cb, NULL); 3276 poll_threads(); 3277 3278 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3279 3280 g_compare_read_buf = NULL; 3281 } 3282 3283 static void 3284 bdev_compare(void) 3285 { 3286 _bdev_compare(true); 3287 _bdev_compare(false); 3288 } 3289 3290 static void 3291 bdev_compare_and_write(void) 3292 { 3293 struct spdk_bdev *bdev; 3294 struct spdk_bdev_desc *desc = NULL; 3295 struct spdk_io_channel *ioch; 3296 struct ut_expected_io *expected_io; 3297 uint64_t offset, num_blocks; 3298 uint32_t num_completed; 3299 char aa_buf[512]; 3300 char bb_buf[512]; 3301 char cc_buf[512]; 3302 char write_buf[512]; 3303 struct iovec compare_iov; 3304 struct iovec write_iov; 3305 int rc; 3306 3307 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3308 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3309 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3310 3311 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3312 3313 spdk_bdev_initialize(bdev_init_cb, NULL); 3314 fn_table.submit_request = stub_submit_request_get_buf; 3315 bdev = allocate_bdev("bdev"); 3316 3317 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3318 CU_ASSERT_EQUAL(rc, 0); 3319 SPDK_CU_ASSERT_FATAL(desc != NULL); 3320 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3321 ioch = spdk_bdev_get_io_channel(desc); 3322 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3323 3324 fn_table.submit_request = stub_submit_request_get_buf; 3325 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3326 3327 offset = 50; 3328 num_blocks = 1; 3329 compare_iov.iov_base = aa_buf; 3330 compare_iov.iov_len = sizeof(aa_buf); 3331 write_iov.iov_base = bb_buf; 3332 write_iov.iov_len = sizeof(bb_buf); 3333 3334 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3335 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3336 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3337 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3338 3339 g_io_done = false; 3340 g_compare_read_buf = aa_buf; 3341 g_compare_read_buf_len = sizeof(aa_buf); 3342 memset(write_buf, 0, sizeof(write_buf)); 3343 g_compare_write_buf = write_buf; 3344 g_compare_write_buf_len = sizeof(write_buf); 3345 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3346 offset, num_blocks, io_done, NULL); 3347 /* Trigger range locking */ 3348 poll_threads(); 3349 CU_ASSERT_EQUAL(rc, 0); 3350 num_completed = stub_complete_io(1); 3351 CU_ASSERT_EQUAL(num_completed, 1); 3352 CU_ASSERT(g_io_done == false); 3353 num_completed = stub_complete_io(1); 3354 /* Trigger range unlocking */ 3355 poll_threads(); 3356 CU_ASSERT_EQUAL(num_completed, 1); 3357 CU_ASSERT(g_io_done == true); 3358 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3359 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3360 3361 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3362 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3363 3364 g_io_done = false; 3365 g_compare_read_buf = cc_buf; 3366 g_compare_read_buf_len = sizeof(cc_buf); 3367 memset(write_buf, 0, sizeof(write_buf)); 3368 g_compare_write_buf = write_buf; 3369 g_compare_write_buf_len = sizeof(write_buf); 3370 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3371 offset, num_blocks, io_done, NULL); 3372 /* Trigger range locking */ 3373 poll_threads(); 3374 CU_ASSERT_EQUAL(rc, 0); 3375 num_completed = stub_complete_io(1); 3376 /* Trigger range unlocking earlier because we expect error here */ 3377 poll_threads(); 3378 CU_ASSERT_EQUAL(num_completed, 1); 3379 CU_ASSERT(g_io_done == true); 3380 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3381 num_completed = stub_complete_io(1); 3382 CU_ASSERT_EQUAL(num_completed, 0); 3383 3384 spdk_put_io_channel(ioch); 3385 spdk_bdev_close(desc); 3386 free_bdev(bdev); 3387 fn_table.submit_request = stub_submit_request; 3388 spdk_bdev_finish(bdev_fini_cb, NULL); 3389 poll_threads(); 3390 3391 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3392 3393 g_compare_read_buf = NULL; 3394 g_compare_write_buf = NULL; 3395 } 3396 3397 static void 3398 bdev_write_zeroes(void) 3399 { 3400 struct spdk_bdev *bdev; 3401 struct spdk_bdev_desc *desc = NULL; 3402 struct spdk_io_channel *ioch; 3403 struct ut_expected_io *expected_io; 3404 uint64_t offset, num_io_blocks, num_blocks; 3405 uint32_t num_completed, num_requests; 3406 int rc; 3407 3408 spdk_bdev_initialize(bdev_init_cb, NULL); 3409 bdev = allocate_bdev("bdev"); 3410 3411 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3412 CU_ASSERT_EQUAL(rc, 0); 3413 SPDK_CU_ASSERT_FATAL(desc != NULL); 3414 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3415 ioch = spdk_bdev_get_io_channel(desc); 3416 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3417 3418 fn_table.submit_request = stub_submit_request; 3419 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3420 3421 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3422 bdev->md_len = 0; 3423 bdev->blocklen = 4096; 3424 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3425 3426 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3427 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3428 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3429 CU_ASSERT_EQUAL(rc, 0); 3430 num_completed = stub_complete_io(1); 3431 CU_ASSERT_EQUAL(num_completed, 1); 3432 3433 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3434 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3435 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3436 num_requests = 2; 3437 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3438 3439 for (offset = 0; offset < num_requests; ++offset) { 3440 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3441 offset * num_io_blocks, num_io_blocks, 0); 3442 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3443 } 3444 3445 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3446 CU_ASSERT_EQUAL(rc, 0); 3447 num_completed = stub_complete_io(num_requests); 3448 CU_ASSERT_EQUAL(num_completed, num_requests); 3449 3450 /* Check that the splitting is correct if bdev has interleaved metadata */ 3451 bdev->md_interleave = true; 3452 bdev->md_len = 64; 3453 bdev->blocklen = 4096 + 64; 3454 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3455 3456 num_requests = offset = 0; 3457 while (offset < num_blocks) { 3458 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3459 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3460 offset, num_io_blocks, 0); 3461 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3462 offset += num_io_blocks; 3463 num_requests++; 3464 } 3465 3466 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3467 CU_ASSERT_EQUAL(rc, 0); 3468 num_completed = stub_complete_io(num_requests); 3469 CU_ASSERT_EQUAL(num_completed, num_requests); 3470 num_completed = stub_complete_io(num_requests); 3471 assert(num_completed == 0); 3472 3473 /* Check the the same for separate metadata buffer */ 3474 bdev->md_interleave = false; 3475 bdev->md_len = 64; 3476 bdev->blocklen = 4096; 3477 3478 num_requests = offset = 0; 3479 while (offset < num_blocks) { 3480 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3481 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3482 offset, num_io_blocks, 0); 3483 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3484 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3485 offset += num_io_blocks; 3486 num_requests++; 3487 } 3488 3489 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3490 CU_ASSERT_EQUAL(rc, 0); 3491 num_completed = stub_complete_io(num_requests); 3492 CU_ASSERT_EQUAL(num_completed, num_requests); 3493 3494 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3495 spdk_put_io_channel(ioch); 3496 spdk_bdev_close(desc); 3497 free_bdev(bdev); 3498 spdk_bdev_finish(bdev_fini_cb, NULL); 3499 poll_threads(); 3500 } 3501 3502 static void 3503 bdev_zcopy_write(void) 3504 { 3505 struct spdk_bdev *bdev; 3506 struct spdk_bdev_desc *desc = NULL; 3507 struct spdk_io_channel *ioch; 3508 struct ut_expected_io *expected_io; 3509 uint64_t offset, num_blocks; 3510 uint32_t num_completed; 3511 char aa_buf[512]; 3512 struct iovec iov; 3513 int rc; 3514 const bool populate = false; 3515 const bool commit = true; 3516 3517 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3518 3519 spdk_bdev_initialize(bdev_init_cb, NULL); 3520 bdev = allocate_bdev("bdev"); 3521 3522 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3523 CU_ASSERT_EQUAL(rc, 0); 3524 SPDK_CU_ASSERT_FATAL(desc != NULL); 3525 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3526 ioch = spdk_bdev_get_io_channel(desc); 3527 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3528 3529 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3530 3531 offset = 50; 3532 num_blocks = 1; 3533 iov.iov_base = NULL; 3534 iov.iov_len = 0; 3535 3536 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3537 g_zcopy_read_buf_len = (uint32_t) -1; 3538 /* Do a zcopy start for a write (populate=false) */ 3539 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3540 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3541 g_io_done = false; 3542 g_zcopy_write_buf = aa_buf; 3543 g_zcopy_write_buf_len = sizeof(aa_buf); 3544 g_zcopy_bdev_io = NULL; 3545 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3546 CU_ASSERT_EQUAL(rc, 0); 3547 num_completed = stub_complete_io(1); 3548 CU_ASSERT_EQUAL(num_completed, 1); 3549 CU_ASSERT(g_io_done == true); 3550 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3551 /* Check that the iov has been set up */ 3552 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3553 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3554 /* Check that the bdev_io has been saved */ 3555 CU_ASSERT(g_zcopy_bdev_io != NULL); 3556 /* Now do the zcopy end for a write (commit=true) */ 3557 g_io_done = false; 3558 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3559 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3560 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3561 CU_ASSERT_EQUAL(rc, 0); 3562 num_completed = stub_complete_io(1); 3563 CU_ASSERT_EQUAL(num_completed, 1); 3564 CU_ASSERT(g_io_done == true); 3565 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3566 /* Check the g_zcopy are reset by io_done */ 3567 CU_ASSERT(g_zcopy_write_buf == NULL); 3568 CU_ASSERT(g_zcopy_write_buf_len == 0); 3569 /* Check that io_done has freed the g_zcopy_bdev_io */ 3570 CU_ASSERT(g_zcopy_bdev_io == NULL); 3571 3572 /* Check the zcopy read buffer has not been touched which 3573 * ensures that the correct buffers were used. 3574 */ 3575 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3576 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3577 3578 spdk_put_io_channel(ioch); 3579 spdk_bdev_close(desc); 3580 free_bdev(bdev); 3581 spdk_bdev_finish(bdev_fini_cb, NULL); 3582 poll_threads(); 3583 } 3584 3585 static void 3586 bdev_zcopy_read(void) 3587 { 3588 struct spdk_bdev *bdev; 3589 struct spdk_bdev_desc *desc = NULL; 3590 struct spdk_io_channel *ioch; 3591 struct ut_expected_io *expected_io; 3592 uint64_t offset, num_blocks; 3593 uint32_t num_completed; 3594 char aa_buf[512]; 3595 struct iovec iov; 3596 int rc; 3597 const bool populate = true; 3598 const bool commit = false; 3599 3600 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3601 3602 spdk_bdev_initialize(bdev_init_cb, NULL); 3603 bdev = allocate_bdev("bdev"); 3604 3605 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3606 CU_ASSERT_EQUAL(rc, 0); 3607 SPDK_CU_ASSERT_FATAL(desc != NULL); 3608 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3609 ioch = spdk_bdev_get_io_channel(desc); 3610 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3611 3612 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3613 3614 offset = 50; 3615 num_blocks = 1; 3616 iov.iov_base = NULL; 3617 iov.iov_len = 0; 3618 3619 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3620 g_zcopy_write_buf_len = (uint32_t) -1; 3621 3622 /* Do a zcopy start for a read (populate=true) */ 3623 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3624 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3625 g_io_done = false; 3626 g_zcopy_read_buf = aa_buf; 3627 g_zcopy_read_buf_len = sizeof(aa_buf); 3628 g_zcopy_bdev_io = NULL; 3629 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3630 CU_ASSERT_EQUAL(rc, 0); 3631 num_completed = stub_complete_io(1); 3632 CU_ASSERT_EQUAL(num_completed, 1); 3633 CU_ASSERT(g_io_done == true); 3634 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3635 /* Check that the iov has been set up */ 3636 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3637 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3638 /* Check that the bdev_io has been saved */ 3639 CU_ASSERT(g_zcopy_bdev_io != NULL); 3640 3641 /* Now do the zcopy end for a read (commit=false) */ 3642 g_io_done = false; 3643 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3644 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3645 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3646 CU_ASSERT_EQUAL(rc, 0); 3647 num_completed = stub_complete_io(1); 3648 CU_ASSERT_EQUAL(num_completed, 1); 3649 CU_ASSERT(g_io_done == true); 3650 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3651 /* Check the g_zcopy are reset by io_done */ 3652 CU_ASSERT(g_zcopy_read_buf == NULL); 3653 CU_ASSERT(g_zcopy_read_buf_len == 0); 3654 /* Check that io_done has freed the g_zcopy_bdev_io */ 3655 CU_ASSERT(g_zcopy_bdev_io == NULL); 3656 3657 /* Check the zcopy write buffer has not been touched which 3658 * ensures that the correct buffers were used. 3659 */ 3660 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3661 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3662 3663 spdk_put_io_channel(ioch); 3664 spdk_bdev_close(desc); 3665 free_bdev(bdev); 3666 spdk_bdev_finish(bdev_fini_cb, NULL); 3667 poll_threads(); 3668 } 3669 3670 static void 3671 bdev_open_while_hotremove(void) 3672 { 3673 struct spdk_bdev *bdev; 3674 struct spdk_bdev_desc *desc[2] = {}; 3675 int rc; 3676 3677 bdev = allocate_bdev("bdev"); 3678 3679 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3680 CU_ASSERT(rc == 0); 3681 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3682 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3683 3684 spdk_bdev_unregister(bdev, NULL, NULL); 3685 3686 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3687 CU_ASSERT(rc == -ENODEV); 3688 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3689 3690 spdk_bdev_close(desc[0]); 3691 free_bdev(bdev); 3692 } 3693 3694 static void 3695 bdev_close_while_hotremove(void) 3696 { 3697 struct spdk_bdev *bdev; 3698 struct spdk_bdev_desc *desc = NULL; 3699 int rc = 0; 3700 3701 bdev = allocate_bdev("bdev"); 3702 3703 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3704 CU_ASSERT_EQUAL(rc, 0); 3705 SPDK_CU_ASSERT_FATAL(desc != NULL); 3706 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3707 3708 /* Simulate hot-unplug by unregistering bdev */ 3709 g_event_type1 = 0xFF; 3710 g_unregister_arg = NULL; 3711 g_unregister_rc = -1; 3712 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3713 /* Close device while remove event is in flight */ 3714 spdk_bdev_close(desc); 3715 3716 /* Ensure that unregister callback is delayed */ 3717 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3718 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3719 3720 poll_threads(); 3721 3722 /* Event callback shall not be issued because device was closed */ 3723 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3724 /* Unregister callback is issued */ 3725 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3726 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3727 3728 free_bdev(bdev); 3729 } 3730 3731 static void 3732 bdev_open_ext(void) 3733 { 3734 struct spdk_bdev *bdev; 3735 struct spdk_bdev_desc *desc1 = NULL; 3736 struct spdk_bdev_desc *desc2 = NULL; 3737 int rc = 0; 3738 3739 bdev = allocate_bdev("bdev"); 3740 3741 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3742 CU_ASSERT_EQUAL(rc, -EINVAL); 3743 3744 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3745 CU_ASSERT_EQUAL(rc, 0); 3746 3747 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3748 CU_ASSERT_EQUAL(rc, 0); 3749 3750 g_event_type1 = 0xFF; 3751 g_event_type2 = 0xFF; 3752 3753 /* Simulate hot-unplug by unregistering bdev */ 3754 spdk_bdev_unregister(bdev, NULL, NULL); 3755 poll_threads(); 3756 3757 /* Check if correct events have been triggered in event callback fn */ 3758 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3759 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3760 3761 free_bdev(bdev); 3762 poll_threads(); 3763 } 3764 3765 struct timeout_io_cb_arg { 3766 struct iovec iov; 3767 uint8_t type; 3768 }; 3769 3770 static int 3771 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3772 { 3773 struct spdk_bdev_io *bdev_io; 3774 int n = 0; 3775 3776 if (!ch) { 3777 return -1; 3778 } 3779 3780 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3781 n++; 3782 } 3783 3784 return n; 3785 } 3786 3787 static void 3788 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3789 { 3790 struct timeout_io_cb_arg *ctx = cb_arg; 3791 3792 ctx->type = bdev_io->type; 3793 ctx->iov.iov_base = bdev_io->iov.iov_base; 3794 ctx->iov.iov_len = bdev_io->iov.iov_len; 3795 } 3796 3797 static void 3798 bdev_set_io_timeout(void) 3799 { 3800 struct spdk_bdev *bdev; 3801 struct spdk_bdev_desc *desc = NULL; 3802 struct spdk_io_channel *io_ch = NULL; 3803 struct spdk_bdev_channel *bdev_ch = NULL; 3804 struct timeout_io_cb_arg cb_arg; 3805 3806 spdk_bdev_initialize(bdev_init_cb, NULL); 3807 3808 bdev = allocate_bdev("bdev"); 3809 3810 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3811 SPDK_CU_ASSERT_FATAL(desc != NULL); 3812 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3813 3814 io_ch = spdk_bdev_get_io_channel(desc); 3815 CU_ASSERT(io_ch != NULL); 3816 3817 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3818 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3819 3820 /* This is the part1. 3821 * We will check the bdev_ch->io_submitted list 3822 * TO make sure that it can link IOs and only the user submitted IOs 3823 */ 3824 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3825 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3826 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3827 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3828 stub_complete_io(1); 3829 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3830 stub_complete_io(1); 3831 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3832 3833 /* Split IO */ 3834 bdev->optimal_io_boundary = 16; 3835 bdev->split_on_optimal_io_boundary = true; 3836 3837 /* Now test that a single-vector command is split correctly. 3838 * Offset 14, length 8, payload 0xF000 3839 * Child - Offset 14, length 2, payload 0xF000 3840 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3841 * 3842 * Set up the expected values before calling spdk_bdev_read_blocks 3843 */ 3844 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3845 /* We count all submitted IOs including IO that are generated by splitting. */ 3846 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 3847 stub_complete_io(1); 3848 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3849 stub_complete_io(1); 3850 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3851 3852 /* Also include the reset IO */ 3853 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3854 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3855 poll_threads(); 3856 stub_complete_io(1); 3857 poll_threads(); 3858 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3859 3860 /* This is part2 3861 * Test the desc timeout poller register 3862 */ 3863 3864 /* Successfully set the timeout */ 3865 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3866 CU_ASSERT(desc->io_timeout_poller != NULL); 3867 CU_ASSERT(desc->timeout_in_sec == 30); 3868 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3869 CU_ASSERT(desc->cb_arg == &cb_arg); 3870 3871 /* Change the timeout limit */ 3872 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3873 CU_ASSERT(desc->io_timeout_poller != NULL); 3874 CU_ASSERT(desc->timeout_in_sec == 20); 3875 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3876 CU_ASSERT(desc->cb_arg == &cb_arg); 3877 3878 /* Disable the timeout */ 3879 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 3880 CU_ASSERT(desc->io_timeout_poller == NULL); 3881 3882 /* This the part3 3883 * We will test to catch timeout IO and check whether the IO is 3884 * the submitted one. 3885 */ 3886 memset(&cb_arg, 0, sizeof(cb_arg)); 3887 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3888 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 3889 3890 /* Don't reach the limit */ 3891 spdk_delay_us(15 * spdk_get_ticks_hz()); 3892 poll_threads(); 3893 CU_ASSERT(cb_arg.type == 0); 3894 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3895 CU_ASSERT(cb_arg.iov.iov_len == 0); 3896 3897 /* 15 + 15 = 30 reach the limit */ 3898 spdk_delay_us(15 * spdk_get_ticks_hz()); 3899 poll_threads(); 3900 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3901 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 3902 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 3903 stub_complete_io(1); 3904 3905 /* Use the same split IO above and check the IO */ 3906 memset(&cb_arg, 0, sizeof(cb_arg)); 3907 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3908 3909 /* The first child complete in time */ 3910 spdk_delay_us(15 * spdk_get_ticks_hz()); 3911 poll_threads(); 3912 stub_complete_io(1); 3913 CU_ASSERT(cb_arg.type == 0); 3914 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3915 CU_ASSERT(cb_arg.iov.iov_len == 0); 3916 3917 /* The second child reach the limit */ 3918 spdk_delay_us(15 * spdk_get_ticks_hz()); 3919 poll_threads(); 3920 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3921 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 3922 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 3923 stub_complete_io(1); 3924 3925 /* Also include the reset IO */ 3926 memset(&cb_arg, 0, sizeof(cb_arg)); 3927 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3928 spdk_delay_us(30 * spdk_get_ticks_hz()); 3929 poll_threads(); 3930 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 3931 stub_complete_io(1); 3932 poll_threads(); 3933 3934 spdk_put_io_channel(io_ch); 3935 spdk_bdev_close(desc); 3936 free_bdev(bdev); 3937 spdk_bdev_finish(bdev_fini_cb, NULL); 3938 poll_threads(); 3939 } 3940 3941 static void 3942 lba_range_overlap(void) 3943 { 3944 struct lba_range r1, r2; 3945 3946 r1.offset = 100; 3947 r1.length = 50; 3948 3949 r2.offset = 0; 3950 r2.length = 1; 3951 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3952 3953 r2.offset = 0; 3954 r2.length = 100; 3955 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3956 3957 r2.offset = 0; 3958 r2.length = 110; 3959 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3960 3961 r2.offset = 100; 3962 r2.length = 10; 3963 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3964 3965 r2.offset = 110; 3966 r2.length = 20; 3967 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3968 3969 r2.offset = 140; 3970 r2.length = 150; 3971 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3972 3973 r2.offset = 130; 3974 r2.length = 200; 3975 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3976 3977 r2.offset = 150; 3978 r2.length = 100; 3979 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3980 3981 r2.offset = 110; 3982 r2.length = 0; 3983 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3984 } 3985 3986 static bool g_lock_lba_range_done; 3987 static bool g_unlock_lba_range_done; 3988 3989 static void 3990 lock_lba_range_done(void *ctx, int status) 3991 { 3992 g_lock_lba_range_done = true; 3993 } 3994 3995 static void 3996 unlock_lba_range_done(void *ctx, int status) 3997 { 3998 g_unlock_lba_range_done = true; 3999 } 4000 4001 static void 4002 lock_lba_range_check_ranges(void) 4003 { 4004 struct spdk_bdev *bdev; 4005 struct spdk_bdev_desc *desc = NULL; 4006 struct spdk_io_channel *io_ch; 4007 struct spdk_bdev_channel *channel; 4008 struct lba_range *range; 4009 int ctx1; 4010 int rc; 4011 4012 spdk_bdev_initialize(bdev_init_cb, NULL); 4013 4014 bdev = allocate_bdev("bdev0"); 4015 4016 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4017 CU_ASSERT(rc == 0); 4018 CU_ASSERT(desc != NULL); 4019 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4020 io_ch = spdk_bdev_get_io_channel(desc); 4021 CU_ASSERT(io_ch != NULL); 4022 channel = spdk_io_channel_get_ctx(io_ch); 4023 4024 g_lock_lba_range_done = false; 4025 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4026 CU_ASSERT(rc == 0); 4027 poll_threads(); 4028 4029 CU_ASSERT(g_lock_lba_range_done == true); 4030 range = TAILQ_FIRST(&channel->locked_ranges); 4031 SPDK_CU_ASSERT_FATAL(range != NULL); 4032 CU_ASSERT(range->offset == 20); 4033 CU_ASSERT(range->length == 10); 4034 CU_ASSERT(range->owner_ch == channel); 4035 4036 /* Unlocks must exactly match a lock. */ 4037 g_unlock_lba_range_done = false; 4038 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4039 CU_ASSERT(rc == -EINVAL); 4040 CU_ASSERT(g_unlock_lba_range_done == false); 4041 4042 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4043 CU_ASSERT(rc == 0); 4044 spdk_delay_us(100); 4045 poll_threads(); 4046 4047 CU_ASSERT(g_unlock_lba_range_done == true); 4048 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4049 4050 spdk_put_io_channel(io_ch); 4051 spdk_bdev_close(desc); 4052 free_bdev(bdev); 4053 spdk_bdev_finish(bdev_fini_cb, NULL); 4054 poll_threads(); 4055 } 4056 4057 static void 4058 lock_lba_range_with_io_outstanding(void) 4059 { 4060 struct spdk_bdev *bdev; 4061 struct spdk_bdev_desc *desc = NULL; 4062 struct spdk_io_channel *io_ch; 4063 struct spdk_bdev_channel *channel; 4064 struct lba_range *range; 4065 char buf[4096]; 4066 int ctx1; 4067 int rc; 4068 4069 spdk_bdev_initialize(bdev_init_cb, NULL); 4070 4071 bdev = allocate_bdev("bdev0"); 4072 4073 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4074 CU_ASSERT(rc == 0); 4075 CU_ASSERT(desc != NULL); 4076 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4077 io_ch = spdk_bdev_get_io_channel(desc); 4078 CU_ASSERT(io_ch != NULL); 4079 channel = spdk_io_channel_get_ctx(io_ch); 4080 4081 g_io_done = false; 4082 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4083 CU_ASSERT(rc == 0); 4084 4085 g_lock_lba_range_done = false; 4086 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4087 CU_ASSERT(rc == 0); 4088 poll_threads(); 4089 4090 /* The lock should immediately become valid, since there are no outstanding 4091 * write I/O. 4092 */ 4093 CU_ASSERT(g_io_done == false); 4094 CU_ASSERT(g_lock_lba_range_done == true); 4095 range = TAILQ_FIRST(&channel->locked_ranges); 4096 SPDK_CU_ASSERT_FATAL(range != NULL); 4097 CU_ASSERT(range->offset == 20); 4098 CU_ASSERT(range->length == 10); 4099 CU_ASSERT(range->owner_ch == channel); 4100 CU_ASSERT(range->locked_ctx == &ctx1); 4101 4102 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4103 CU_ASSERT(rc == 0); 4104 stub_complete_io(1); 4105 spdk_delay_us(100); 4106 poll_threads(); 4107 4108 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4109 4110 /* Now try again, but with a write I/O. */ 4111 g_io_done = false; 4112 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4113 CU_ASSERT(rc == 0); 4114 4115 g_lock_lba_range_done = false; 4116 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4117 CU_ASSERT(rc == 0); 4118 poll_threads(); 4119 4120 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4121 * But note that the range should be on the channel's locked_list, to make sure no 4122 * new write I/O are started. 4123 */ 4124 CU_ASSERT(g_io_done == false); 4125 CU_ASSERT(g_lock_lba_range_done == false); 4126 range = TAILQ_FIRST(&channel->locked_ranges); 4127 SPDK_CU_ASSERT_FATAL(range != NULL); 4128 CU_ASSERT(range->offset == 20); 4129 CU_ASSERT(range->length == 10); 4130 4131 /* Complete the write I/O. This should make the lock valid (checked by confirming 4132 * our callback was invoked). 4133 */ 4134 stub_complete_io(1); 4135 spdk_delay_us(100); 4136 poll_threads(); 4137 CU_ASSERT(g_io_done == true); 4138 CU_ASSERT(g_lock_lba_range_done == true); 4139 4140 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4141 CU_ASSERT(rc == 0); 4142 poll_threads(); 4143 4144 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4145 4146 spdk_put_io_channel(io_ch); 4147 spdk_bdev_close(desc); 4148 free_bdev(bdev); 4149 spdk_bdev_finish(bdev_fini_cb, NULL); 4150 poll_threads(); 4151 } 4152 4153 static void 4154 lock_lba_range_overlapped(void) 4155 { 4156 struct spdk_bdev *bdev; 4157 struct spdk_bdev_desc *desc = NULL; 4158 struct spdk_io_channel *io_ch; 4159 struct spdk_bdev_channel *channel; 4160 struct lba_range *range; 4161 int ctx1; 4162 int rc; 4163 4164 spdk_bdev_initialize(bdev_init_cb, NULL); 4165 4166 bdev = allocate_bdev("bdev0"); 4167 4168 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4169 CU_ASSERT(rc == 0); 4170 CU_ASSERT(desc != NULL); 4171 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4172 io_ch = spdk_bdev_get_io_channel(desc); 4173 CU_ASSERT(io_ch != NULL); 4174 channel = spdk_io_channel_get_ctx(io_ch); 4175 4176 /* Lock range 20-29. */ 4177 g_lock_lba_range_done = false; 4178 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4179 CU_ASSERT(rc == 0); 4180 poll_threads(); 4181 4182 CU_ASSERT(g_lock_lba_range_done == true); 4183 range = TAILQ_FIRST(&channel->locked_ranges); 4184 SPDK_CU_ASSERT_FATAL(range != NULL); 4185 CU_ASSERT(range->offset == 20); 4186 CU_ASSERT(range->length == 10); 4187 4188 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4189 * 20-29. 4190 */ 4191 g_lock_lba_range_done = false; 4192 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4193 CU_ASSERT(rc == 0); 4194 poll_threads(); 4195 4196 CU_ASSERT(g_lock_lba_range_done == false); 4197 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4198 SPDK_CU_ASSERT_FATAL(range != NULL); 4199 CU_ASSERT(range->offset == 25); 4200 CU_ASSERT(range->length == 15); 4201 4202 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4203 * no longer overlaps with an active lock. 4204 */ 4205 g_unlock_lba_range_done = false; 4206 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4207 CU_ASSERT(rc == 0); 4208 poll_threads(); 4209 4210 CU_ASSERT(g_unlock_lba_range_done == true); 4211 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4212 range = TAILQ_FIRST(&channel->locked_ranges); 4213 SPDK_CU_ASSERT_FATAL(range != NULL); 4214 CU_ASSERT(range->offset == 25); 4215 CU_ASSERT(range->length == 15); 4216 4217 /* Lock 40-59. This should immediately lock since it does not overlap with the 4218 * currently active 25-39 lock. 4219 */ 4220 g_lock_lba_range_done = false; 4221 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4222 CU_ASSERT(rc == 0); 4223 poll_threads(); 4224 4225 CU_ASSERT(g_lock_lba_range_done == true); 4226 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4227 SPDK_CU_ASSERT_FATAL(range != NULL); 4228 range = TAILQ_NEXT(range, tailq); 4229 SPDK_CU_ASSERT_FATAL(range != NULL); 4230 CU_ASSERT(range->offset == 40); 4231 CU_ASSERT(range->length == 20); 4232 4233 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4234 g_lock_lba_range_done = false; 4235 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4236 CU_ASSERT(rc == 0); 4237 poll_threads(); 4238 4239 CU_ASSERT(g_lock_lba_range_done == false); 4240 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4241 SPDK_CU_ASSERT_FATAL(range != NULL); 4242 CU_ASSERT(range->offset == 35); 4243 CU_ASSERT(range->length == 10); 4244 4245 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4246 * the 40-59 lock is still active. 4247 */ 4248 g_unlock_lba_range_done = false; 4249 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4250 CU_ASSERT(rc == 0); 4251 poll_threads(); 4252 4253 CU_ASSERT(g_unlock_lba_range_done == true); 4254 CU_ASSERT(g_lock_lba_range_done == false); 4255 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4256 SPDK_CU_ASSERT_FATAL(range != NULL); 4257 CU_ASSERT(range->offset == 35); 4258 CU_ASSERT(range->length == 10); 4259 4260 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4261 * no longer any active overlapping locks. 4262 */ 4263 g_unlock_lba_range_done = false; 4264 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4265 CU_ASSERT(rc == 0); 4266 poll_threads(); 4267 4268 CU_ASSERT(g_unlock_lba_range_done == true); 4269 CU_ASSERT(g_lock_lba_range_done == true); 4270 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4271 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4272 SPDK_CU_ASSERT_FATAL(range != NULL); 4273 CU_ASSERT(range->offset == 35); 4274 CU_ASSERT(range->length == 10); 4275 4276 /* Finally, unlock 35-44. */ 4277 g_unlock_lba_range_done = false; 4278 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4279 CU_ASSERT(rc == 0); 4280 poll_threads(); 4281 4282 CU_ASSERT(g_unlock_lba_range_done == true); 4283 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4284 4285 spdk_put_io_channel(io_ch); 4286 spdk_bdev_close(desc); 4287 free_bdev(bdev); 4288 spdk_bdev_finish(bdev_fini_cb, NULL); 4289 poll_threads(); 4290 } 4291 4292 static void 4293 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4294 { 4295 g_abort_done = true; 4296 g_abort_status = bdev_io->internal.status; 4297 spdk_bdev_free_io(bdev_io); 4298 } 4299 4300 static void 4301 bdev_io_abort(void) 4302 { 4303 struct spdk_bdev *bdev; 4304 struct spdk_bdev_desc *desc = NULL; 4305 struct spdk_io_channel *io_ch; 4306 struct spdk_bdev_channel *channel; 4307 struct spdk_bdev_mgmt_channel *mgmt_ch; 4308 struct spdk_bdev_opts bdev_opts = {}; 4309 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4310 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4311 int rc; 4312 4313 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4314 bdev_opts.bdev_io_pool_size = 7; 4315 bdev_opts.bdev_io_cache_size = 2; 4316 4317 rc = spdk_bdev_set_opts(&bdev_opts); 4318 CU_ASSERT(rc == 0); 4319 spdk_bdev_initialize(bdev_init_cb, NULL); 4320 4321 bdev = allocate_bdev("bdev0"); 4322 4323 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4324 CU_ASSERT(rc == 0); 4325 CU_ASSERT(desc != NULL); 4326 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4327 io_ch = spdk_bdev_get_io_channel(desc); 4328 CU_ASSERT(io_ch != NULL); 4329 channel = spdk_io_channel_get_ctx(io_ch); 4330 mgmt_ch = channel->shared_resource->mgmt_ch; 4331 4332 g_abort_done = false; 4333 4334 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4335 4336 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4337 CU_ASSERT(rc == -ENOTSUP); 4338 4339 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4340 4341 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4342 CU_ASSERT(rc == 0); 4343 CU_ASSERT(g_abort_done == true); 4344 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4345 4346 /* Test the case that the target I/O was successfully aborted. */ 4347 g_io_done = false; 4348 4349 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4350 CU_ASSERT(rc == 0); 4351 CU_ASSERT(g_io_done == false); 4352 4353 g_abort_done = false; 4354 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4355 4356 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4357 CU_ASSERT(rc == 0); 4358 CU_ASSERT(g_io_done == true); 4359 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4360 stub_complete_io(1); 4361 CU_ASSERT(g_abort_done == true); 4362 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4363 4364 /* Test the case that the target I/O was not aborted because it completed 4365 * in the middle of execution of the abort. 4366 */ 4367 g_io_done = false; 4368 4369 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4370 CU_ASSERT(rc == 0); 4371 CU_ASSERT(g_io_done == false); 4372 4373 g_abort_done = false; 4374 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4375 4376 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4377 CU_ASSERT(rc == 0); 4378 CU_ASSERT(g_io_done == false); 4379 4380 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4381 stub_complete_io(1); 4382 CU_ASSERT(g_io_done == true); 4383 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4384 4385 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4386 stub_complete_io(1); 4387 CU_ASSERT(g_abort_done == true); 4388 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4389 4390 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4391 4392 bdev->optimal_io_boundary = 16; 4393 bdev->split_on_optimal_io_boundary = true; 4394 4395 /* Test that a single-vector command which is split is aborted correctly. 4396 * Offset 14, length 8, payload 0xF000 4397 * Child - Offset 14, length 2, payload 0xF000 4398 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4399 */ 4400 g_io_done = false; 4401 4402 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4403 CU_ASSERT(rc == 0); 4404 CU_ASSERT(g_io_done == false); 4405 4406 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4407 4408 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4409 4410 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4411 CU_ASSERT(rc == 0); 4412 CU_ASSERT(g_io_done == true); 4413 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4414 stub_complete_io(2); 4415 CU_ASSERT(g_abort_done == true); 4416 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4417 4418 /* Test that a multi-vector command that needs to be split by strip and then 4419 * needs to be split is aborted correctly. Abort is requested before the second 4420 * child I/O was submitted. The parent I/O should complete with failure without 4421 * submitting the second child I/O. 4422 */ 4423 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4424 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4425 iov[i].iov_len = 512; 4426 } 4427 4428 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4429 g_io_done = false; 4430 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4431 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4432 CU_ASSERT(rc == 0); 4433 CU_ASSERT(g_io_done == false); 4434 4435 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4436 4437 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4438 4439 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4440 CU_ASSERT(rc == 0); 4441 CU_ASSERT(g_io_done == true); 4442 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4443 stub_complete_io(1); 4444 CU_ASSERT(g_abort_done == true); 4445 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4446 4447 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4448 4449 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4450 4451 bdev->optimal_io_boundary = 16; 4452 g_io_done = false; 4453 4454 /* Test that a ingle-vector command which is split is aborted correctly. 4455 * Differently from the above, the child abort request will be submitted 4456 * sequentially due to the capacity of spdk_bdev_io. 4457 */ 4458 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4459 CU_ASSERT(rc == 0); 4460 CU_ASSERT(g_io_done == false); 4461 4462 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4463 4464 g_abort_done = false; 4465 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4466 4467 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4468 CU_ASSERT(rc == 0); 4469 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4470 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4471 4472 stub_complete_io(1); 4473 CU_ASSERT(g_io_done == true); 4474 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4475 stub_complete_io(3); 4476 CU_ASSERT(g_abort_done == true); 4477 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4478 4479 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4480 4481 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4482 4483 spdk_put_io_channel(io_ch); 4484 spdk_bdev_close(desc); 4485 free_bdev(bdev); 4486 spdk_bdev_finish(bdev_fini_cb, NULL); 4487 poll_threads(); 4488 } 4489 4490 static void 4491 bdev_unmap(void) 4492 { 4493 struct spdk_bdev *bdev; 4494 struct spdk_bdev_desc *desc = NULL; 4495 struct spdk_io_channel *ioch; 4496 struct spdk_bdev_channel *bdev_ch; 4497 struct ut_expected_io *expected_io; 4498 struct spdk_bdev_opts bdev_opts = {}; 4499 uint32_t i, num_outstanding; 4500 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4501 int rc; 4502 4503 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4504 bdev_opts.bdev_io_pool_size = 512; 4505 bdev_opts.bdev_io_cache_size = 64; 4506 rc = spdk_bdev_set_opts(&bdev_opts); 4507 CU_ASSERT(rc == 0); 4508 4509 spdk_bdev_initialize(bdev_init_cb, NULL); 4510 bdev = allocate_bdev("bdev"); 4511 4512 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4513 CU_ASSERT_EQUAL(rc, 0); 4514 SPDK_CU_ASSERT_FATAL(desc != NULL); 4515 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4516 ioch = spdk_bdev_get_io_channel(desc); 4517 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4518 bdev_ch = spdk_io_channel_get_ctx(ioch); 4519 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4520 4521 fn_table.submit_request = stub_submit_request; 4522 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4523 4524 /* Case 1: First test the request won't be split */ 4525 num_blocks = 32; 4526 4527 g_io_done = false; 4528 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4530 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4531 CU_ASSERT_EQUAL(rc, 0); 4532 CU_ASSERT(g_io_done == false); 4533 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4534 stub_complete_io(1); 4535 CU_ASSERT(g_io_done == true); 4536 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4537 4538 /* Case 2: Test the split with 2 children requests */ 4539 bdev->max_unmap = 8; 4540 bdev->max_unmap_segments = 2; 4541 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4542 num_blocks = max_unmap_blocks * 2; 4543 offset = 0; 4544 4545 g_io_done = false; 4546 for (i = 0; i < 2; i++) { 4547 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4548 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4549 offset += max_unmap_blocks; 4550 } 4551 4552 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4553 CU_ASSERT_EQUAL(rc, 0); 4554 CU_ASSERT(g_io_done == false); 4555 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4556 stub_complete_io(2); 4557 CU_ASSERT(g_io_done == true); 4558 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4559 4560 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4561 num_children = 15; 4562 num_blocks = max_unmap_blocks * num_children; 4563 g_io_done = false; 4564 offset = 0; 4565 for (i = 0; i < num_children; i++) { 4566 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4567 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4568 offset += max_unmap_blocks; 4569 } 4570 4571 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4572 CU_ASSERT_EQUAL(rc, 0); 4573 CU_ASSERT(g_io_done == false); 4574 4575 while (num_children > 0) { 4576 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4577 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4578 stub_complete_io(num_outstanding); 4579 num_children -= num_outstanding; 4580 } 4581 CU_ASSERT(g_io_done == true); 4582 4583 spdk_put_io_channel(ioch); 4584 spdk_bdev_close(desc); 4585 free_bdev(bdev); 4586 spdk_bdev_finish(bdev_fini_cb, NULL); 4587 poll_threads(); 4588 } 4589 4590 static void 4591 bdev_write_zeroes_split_test(void) 4592 { 4593 struct spdk_bdev *bdev; 4594 struct spdk_bdev_desc *desc = NULL; 4595 struct spdk_io_channel *ioch; 4596 struct spdk_bdev_channel *bdev_ch; 4597 struct ut_expected_io *expected_io; 4598 struct spdk_bdev_opts bdev_opts = {}; 4599 uint32_t i, num_outstanding; 4600 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 4601 int rc; 4602 4603 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4604 bdev_opts.bdev_io_pool_size = 512; 4605 bdev_opts.bdev_io_cache_size = 64; 4606 rc = spdk_bdev_set_opts(&bdev_opts); 4607 CU_ASSERT(rc == 0); 4608 4609 spdk_bdev_initialize(bdev_init_cb, NULL); 4610 bdev = allocate_bdev("bdev"); 4611 4612 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4613 CU_ASSERT_EQUAL(rc, 0); 4614 SPDK_CU_ASSERT_FATAL(desc != NULL); 4615 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4616 ioch = spdk_bdev_get_io_channel(desc); 4617 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4618 bdev_ch = spdk_io_channel_get_ctx(ioch); 4619 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4620 4621 fn_table.submit_request = stub_submit_request; 4622 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4623 4624 /* Case 1: First test the request won't be split */ 4625 num_blocks = 32; 4626 4627 g_io_done = false; 4628 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4629 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4630 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4631 CU_ASSERT_EQUAL(rc, 0); 4632 CU_ASSERT(g_io_done == false); 4633 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4634 stub_complete_io(1); 4635 CU_ASSERT(g_io_done == true); 4636 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4637 4638 /* Case 2: Test the split with 2 children requests */ 4639 max_write_zeroes_blocks = 8; 4640 bdev->max_write_zeroes = max_write_zeroes_blocks; 4641 num_blocks = max_write_zeroes_blocks * 2; 4642 offset = 0; 4643 4644 g_io_done = false; 4645 for (i = 0; i < 2; i++) { 4646 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4647 0); 4648 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4649 offset += max_write_zeroes_blocks; 4650 } 4651 4652 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4653 CU_ASSERT_EQUAL(rc, 0); 4654 CU_ASSERT(g_io_done == false); 4655 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4656 stub_complete_io(2); 4657 CU_ASSERT(g_io_done == true); 4658 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4659 4660 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4661 num_children = 15; 4662 num_blocks = max_write_zeroes_blocks * num_children; 4663 g_io_done = false; 4664 offset = 0; 4665 for (i = 0; i < num_children; i++) { 4666 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4667 0); 4668 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4669 offset += max_write_zeroes_blocks; 4670 } 4671 4672 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4673 CU_ASSERT_EQUAL(rc, 0); 4674 CU_ASSERT(g_io_done == false); 4675 4676 while (num_children > 0) { 4677 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4678 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4679 stub_complete_io(num_outstanding); 4680 num_children -= num_outstanding; 4681 } 4682 CU_ASSERT(g_io_done == true); 4683 4684 spdk_put_io_channel(ioch); 4685 spdk_bdev_close(desc); 4686 free_bdev(bdev); 4687 spdk_bdev_finish(bdev_fini_cb, NULL); 4688 poll_threads(); 4689 } 4690 4691 static void 4692 bdev_set_options_test(void) 4693 { 4694 struct spdk_bdev_opts bdev_opts = {}; 4695 int rc; 4696 4697 /* Case1: Do not set opts_size */ 4698 rc = spdk_bdev_set_opts(&bdev_opts); 4699 CU_ASSERT(rc == -1); 4700 4701 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4702 bdev_opts.bdev_io_pool_size = 4; 4703 bdev_opts.bdev_io_cache_size = 2; 4704 bdev_opts.small_buf_pool_size = 4; 4705 4706 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4707 rc = spdk_bdev_set_opts(&bdev_opts); 4708 CU_ASSERT(rc == -1); 4709 4710 /* Case 3: Do not set valid large_buf_pool_size */ 4711 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4712 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4713 rc = spdk_bdev_set_opts(&bdev_opts); 4714 CU_ASSERT(rc == -1); 4715 4716 /* Case4: set valid large buf_pool_size */ 4717 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4718 rc = spdk_bdev_set_opts(&bdev_opts); 4719 CU_ASSERT(rc == 0); 4720 4721 /* Case5: Set different valid value for small and large buf pool */ 4722 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4723 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4724 rc = spdk_bdev_set_opts(&bdev_opts); 4725 CU_ASSERT(rc == 0); 4726 } 4727 4728 static uint64_t 4729 get_ns_time(void) 4730 { 4731 int rc; 4732 struct timespec ts; 4733 4734 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 4735 CU_ASSERT(rc == 0); 4736 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 4737 } 4738 4739 static int 4740 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 4741 { 4742 int h1, h2; 4743 4744 if (bdev_name == NULL) { 4745 return -1; 4746 } else { 4747 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 4748 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 4749 4750 return spdk_max(h1, h2) + 1; 4751 } 4752 } 4753 4754 static void 4755 bdev_multi_allocation(void) 4756 { 4757 const int max_bdev_num = 1024 * 16; 4758 char name[max_bdev_num][10]; 4759 char noexist_name[] = "invalid_bdev"; 4760 struct spdk_bdev *bdev[max_bdev_num]; 4761 int i, j; 4762 uint64_t last_time; 4763 int bdev_num; 4764 int height; 4765 4766 for (j = 0; j < max_bdev_num; j++) { 4767 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 4768 } 4769 4770 for (i = 0; i < 16; i++) { 4771 last_time = get_ns_time(); 4772 bdev_num = 1024 * (i + 1); 4773 for (j = 0; j < bdev_num; j++) { 4774 bdev[j] = allocate_bdev(name[j]); 4775 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 4776 CU_ASSERT(height <= (int)(spdk_u32log2(j + 1))); 4777 } 4778 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 4779 (get_ns_time() - last_time) / 1000 / 1000); 4780 for (j = 0; j < bdev_num; j++) { 4781 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 4782 } 4783 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 4784 4785 for (j = 0; j < bdev_num; j++) { 4786 free_bdev(bdev[j]); 4787 } 4788 for (j = 0; j < bdev_num; j++) { 4789 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 4790 } 4791 } 4792 } 4793 4794 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 4795 4796 static int 4797 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 4798 int array_size) 4799 { 4800 if (array_size > 0 && domains) { 4801 domains[0] = g_bdev_memory_domain; 4802 } 4803 4804 return 1; 4805 } 4806 4807 static void 4808 bdev_get_memory_domains(void) 4809 { 4810 struct spdk_bdev_fn_table fn_table = { 4811 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 4812 }; 4813 struct spdk_bdev bdev = { .fn_table = &fn_table }; 4814 struct spdk_memory_domain *domains[2] = {}; 4815 int rc; 4816 4817 /* bdev is NULL */ 4818 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 4819 CU_ASSERT(rc == -EINVAL); 4820 4821 /* domains is NULL */ 4822 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 4823 CU_ASSERT(rc == 1); 4824 4825 /* array size is 0 */ 4826 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 4827 CU_ASSERT(rc == 1); 4828 4829 /* get_supported_dma_device_types op is set */ 4830 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 4831 CU_ASSERT(rc == 1); 4832 CU_ASSERT(domains[0] == g_bdev_memory_domain); 4833 4834 /* get_supported_dma_device_types op is not set */ 4835 fn_table.get_memory_domains = NULL; 4836 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 4837 CU_ASSERT(rc == 0); 4838 } 4839 4840 static void 4841 bdev_writev_readv_ext(void) 4842 { 4843 struct spdk_bdev *bdev; 4844 struct spdk_bdev_desc *desc = NULL; 4845 struct spdk_io_channel *io_ch; 4846 struct iovec iov = { .iov_base = (void *)0xbaaddead, .iov_len = 0x1000 }; 4847 struct ut_expected_io *expected_io; 4848 struct spdk_bdev_ext_io_opts ext_io_opts = { 4849 .metadata = (void *)0xFF000000 4850 }; 4851 int rc; 4852 4853 spdk_bdev_initialize(bdev_init_cb, NULL); 4854 4855 bdev = allocate_bdev("bdev0"); 4856 bdev->md_interleave = false; 4857 bdev->md_len = 8; 4858 4859 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4860 CU_ASSERT(rc == 0); 4861 SPDK_CU_ASSERT_FATAL(desc != NULL); 4862 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4863 io_ch = spdk_bdev_get_io_channel(desc); 4864 CU_ASSERT(io_ch != NULL); 4865 4866 g_io_done = false; 4867 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 4868 expected_io->md_buf = ext_io_opts.metadata; 4869 expected_io->ext_io_opts = &ext_io_opts; 4870 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 4871 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4872 4873 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 4874 4875 CU_ASSERT(rc == 0); 4876 CU_ASSERT(g_io_done == false); 4877 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4878 stub_complete_io(1); 4879 CU_ASSERT(g_io_done == true); 4880 4881 g_io_done = false; 4882 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 4883 expected_io->md_buf = ext_io_opts.metadata; 4884 expected_io->ext_io_opts = &ext_io_opts; 4885 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 4886 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4887 4888 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 4889 4890 CU_ASSERT(rc == 0); 4891 CU_ASSERT(g_io_done == false); 4892 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4893 stub_complete_io(1); 4894 CU_ASSERT(g_io_done == true); 4895 4896 spdk_put_io_channel(io_ch); 4897 spdk_bdev_close(desc); 4898 free_bdev(bdev); 4899 spdk_bdev_finish(bdev_fini_cb, NULL); 4900 poll_threads(); 4901 } 4902 4903 int 4904 main(int argc, char **argv) 4905 { 4906 CU_pSuite suite = NULL; 4907 unsigned int num_failures; 4908 4909 CU_set_error_action(CUEA_ABORT); 4910 CU_initialize_registry(); 4911 4912 suite = CU_add_suite("bdev", null_init, null_clean); 4913 4914 CU_ADD_TEST(suite, bytes_to_blocks_test); 4915 CU_ADD_TEST(suite, num_blocks_test); 4916 CU_ADD_TEST(suite, io_valid_test); 4917 CU_ADD_TEST(suite, open_write_test); 4918 CU_ADD_TEST(suite, alias_add_del_test); 4919 CU_ADD_TEST(suite, get_device_stat_test); 4920 CU_ADD_TEST(suite, bdev_io_types_test); 4921 CU_ADD_TEST(suite, bdev_io_wait_test); 4922 CU_ADD_TEST(suite, bdev_io_spans_split_test); 4923 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 4924 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 4925 CU_ADD_TEST(suite, bdev_io_mix_split_test); 4926 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 4927 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 4928 CU_ADD_TEST(suite, bdev_io_alignment); 4929 CU_ADD_TEST(suite, bdev_histograms); 4930 CU_ADD_TEST(suite, bdev_write_zeroes); 4931 CU_ADD_TEST(suite, bdev_compare_and_write); 4932 CU_ADD_TEST(suite, bdev_compare); 4933 CU_ADD_TEST(suite, bdev_zcopy_write); 4934 CU_ADD_TEST(suite, bdev_zcopy_read); 4935 CU_ADD_TEST(suite, bdev_open_while_hotremove); 4936 CU_ADD_TEST(suite, bdev_close_while_hotremove); 4937 CU_ADD_TEST(suite, bdev_open_ext); 4938 CU_ADD_TEST(suite, bdev_set_io_timeout); 4939 CU_ADD_TEST(suite, lba_range_overlap); 4940 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 4941 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 4942 CU_ADD_TEST(suite, lock_lba_range_overlapped); 4943 CU_ADD_TEST(suite, bdev_io_abort); 4944 CU_ADD_TEST(suite, bdev_unmap); 4945 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 4946 CU_ADD_TEST(suite, bdev_set_options_test); 4947 CU_ADD_TEST(suite, bdev_multi_allocation); 4948 CU_ADD_TEST(suite, bdev_get_memory_domains); 4949 CU_ADD_TEST(suite, bdev_writev_readv_ext); 4950 4951 allocate_cores(1); 4952 allocate_threads(1); 4953 set_thread(0); 4954 4955 CU_basic_set_mode(CU_BRM_VERBOSE); 4956 CU_basic_run_tests(); 4957 num_failures = CU_get_number_of_failures(); 4958 CU_cleanup_registry(); 4959 4960 free_threads(); 4961 free_cores(); 4962 4963 return num_failures; 4964 } 4965