1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk_cunit.h" 36 37 #include "common/lib/ut_multithread.c" 38 #include "unit/lib/json_mock.c" 39 40 #include "spdk/config.h" 41 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 42 #undef SPDK_CONFIG_VTUNE 43 44 #include "bdev/bdev.c" 45 46 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 47 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 48 49 int g_status; 50 int g_count; 51 enum spdk_bdev_event_type g_event_type1; 52 enum spdk_bdev_event_type g_event_type2; 53 struct spdk_histogram_data *g_histogram; 54 void *g_unregister_arg; 55 int g_unregister_rc; 56 57 void 58 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 59 int *sc, int *sk, int *asc, int *ascq) 60 { 61 } 62 63 static int 64 null_init(void) 65 { 66 return 0; 67 } 68 69 static int 70 null_clean(void) 71 { 72 return 0; 73 } 74 75 static int 76 stub_destruct(void *ctx) 77 { 78 return 0; 79 } 80 81 struct ut_expected_io { 82 uint8_t type; 83 uint64_t offset; 84 uint64_t length; 85 int iovcnt; 86 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 87 void *md_buf; 88 TAILQ_ENTRY(ut_expected_io) link; 89 }; 90 91 struct bdev_ut_channel { 92 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 93 uint32_t outstanding_io_count; 94 TAILQ_HEAD(, ut_expected_io) expected_io; 95 }; 96 97 static bool g_io_done; 98 static struct spdk_bdev_io *g_bdev_io; 99 static enum spdk_bdev_io_status g_io_status; 100 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 101 static uint32_t g_bdev_ut_io_device; 102 static struct bdev_ut_channel *g_bdev_ut_channel; 103 static void *g_compare_read_buf; 104 static uint32_t g_compare_read_buf_len; 105 static void *g_compare_write_buf; 106 static uint32_t g_compare_write_buf_len; 107 static bool g_abort_done; 108 static enum spdk_bdev_io_status g_abort_status; 109 static void *g_zcopy_read_buf; 110 static uint32_t g_zcopy_read_buf_len; 111 static void *g_zcopy_write_buf; 112 static uint32_t g_zcopy_write_buf_len; 113 static struct spdk_bdev_io *g_zcopy_bdev_io; 114 115 static struct ut_expected_io * 116 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 117 { 118 struct ut_expected_io *expected_io; 119 120 expected_io = calloc(1, sizeof(*expected_io)); 121 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 122 123 expected_io->type = type; 124 expected_io->offset = offset; 125 expected_io->length = length; 126 expected_io->iovcnt = iovcnt; 127 128 return expected_io; 129 } 130 131 static void 132 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 133 { 134 expected_io->iov[pos].iov_base = base; 135 expected_io->iov[pos].iov_len = len; 136 } 137 138 static void 139 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 140 { 141 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 142 struct ut_expected_io *expected_io; 143 struct iovec *iov, *expected_iov; 144 struct spdk_bdev_io *bio_to_abort; 145 int i; 146 147 g_bdev_io = bdev_io; 148 149 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 150 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 151 152 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 153 CU_ASSERT(g_compare_read_buf_len == len); 154 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 155 } 156 157 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 158 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 159 160 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 161 CU_ASSERT(g_compare_write_buf_len == len); 162 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 163 } 164 165 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 166 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 167 168 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 169 CU_ASSERT(g_compare_read_buf_len == len); 170 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 171 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 172 } 173 } 174 175 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 176 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 177 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 178 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 179 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 180 ch->outstanding_io_count--; 181 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 182 break; 183 } 184 } 185 } 186 } 187 188 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 189 if (bdev_io->u.bdev.zcopy.start) { 190 g_zcopy_bdev_io = bdev_io; 191 if (bdev_io->u.bdev.zcopy.populate) { 192 /* Start of a read */ 193 CU_ASSERT(g_zcopy_read_buf != NULL); 194 CU_ASSERT(g_zcopy_read_buf_len > 0); 195 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 196 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 197 bdev_io->u.bdev.iovcnt = 1; 198 } else { 199 /* Start of a write */ 200 CU_ASSERT(g_zcopy_write_buf != NULL); 201 CU_ASSERT(g_zcopy_write_buf_len > 0); 202 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 203 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 204 bdev_io->u.bdev.iovcnt = 1; 205 } 206 } else { 207 if (bdev_io->u.bdev.zcopy.commit) { 208 /* End of write */ 209 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 210 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 211 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 212 g_zcopy_write_buf = NULL; 213 g_zcopy_write_buf_len = 0; 214 } else { 215 /* End of read */ 216 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 217 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 218 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 219 g_zcopy_read_buf = NULL; 220 g_zcopy_read_buf_len = 0; 221 } 222 } 223 } 224 225 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 226 ch->outstanding_io_count++; 227 228 expected_io = TAILQ_FIRST(&ch->expected_io); 229 if (expected_io == NULL) { 230 return; 231 } 232 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 233 234 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 235 CU_ASSERT(bdev_io->type == expected_io->type); 236 } 237 238 if (expected_io->md_buf != NULL) { 239 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 240 } 241 242 if (expected_io->length == 0) { 243 free(expected_io); 244 return; 245 } 246 247 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 248 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 249 250 if (expected_io->iovcnt == 0) { 251 free(expected_io); 252 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 253 return; 254 } 255 256 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 257 for (i = 0; i < expected_io->iovcnt; i++) { 258 iov = &bdev_io->u.bdev.iovs[i]; 259 expected_iov = &expected_io->iov[i]; 260 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 261 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 262 } 263 264 free(expected_io); 265 } 266 267 static void 268 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 269 struct spdk_bdev_io *bdev_io, bool success) 270 { 271 CU_ASSERT(success == true); 272 273 stub_submit_request(_ch, bdev_io); 274 } 275 276 static void 277 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 278 { 279 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 280 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 281 } 282 283 static uint32_t 284 stub_complete_io(uint32_t num_to_complete) 285 { 286 struct bdev_ut_channel *ch = g_bdev_ut_channel; 287 struct spdk_bdev_io *bdev_io; 288 static enum spdk_bdev_io_status io_status; 289 uint32_t num_completed = 0; 290 291 while (num_completed < num_to_complete) { 292 if (TAILQ_EMPTY(&ch->outstanding_io)) { 293 break; 294 } 295 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 296 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 297 ch->outstanding_io_count--; 298 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 299 g_io_exp_status; 300 spdk_bdev_io_complete(bdev_io, io_status); 301 num_completed++; 302 } 303 304 return num_completed; 305 } 306 307 static struct spdk_io_channel * 308 bdev_ut_get_io_channel(void *ctx) 309 { 310 return spdk_get_io_channel(&g_bdev_ut_io_device); 311 } 312 313 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 314 [SPDK_BDEV_IO_TYPE_READ] = true, 315 [SPDK_BDEV_IO_TYPE_WRITE] = true, 316 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 317 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 318 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 319 [SPDK_BDEV_IO_TYPE_RESET] = true, 320 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 321 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 322 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 323 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 324 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 325 [SPDK_BDEV_IO_TYPE_ABORT] = true, 326 }; 327 328 static void 329 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 330 { 331 g_io_types_supported[io_type] = enable; 332 } 333 334 static bool 335 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 336 { 337 return g_io_types_supported[io_type]; 338 } 339 340 static struct spdk_bdev_fn_table fn_table = { 341 .destruct = stub_destruct, 342 .submit_request = stub_submit_request, 343 .get_io_channel = bdev_ut_get_io_channel, 344 .io_type_supported = stub_io_type_supported, 345 }; 346 347 static int 348 bdev_ut_create_ch(void *io_device, void *ctx_buf) 349 { 350 struct bdev_ut_channel *ch = ctx_buf; 351 352 CU_ASSERT(g_bdev_ut_channel == NULL); 353 g_bdev_ut_channel = ch; 354 355 TAILQ_INIT(&ch->outstanding_io); 356 ch->outstanding_io_count = 0; 357 TAILQ_INIT(&ch->expected_io); 358 return 0; 359 } 360 361 static void 362 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 363 { 364 CU_ASSERT(g_bdev_ut_channel != NULL); 365 g_bdev_ut_channel = NULL; 366 } 367 368 struct spdk_bdev_module bdev_ut_if; 369 370 static int 371 bdev_ut_module_init(void) 372 { 373 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 374 sizeof(struct bdev_ut_channel), NULL); 375 spdk_bdev_module_init_done(&bdev_ut_if); 376 return 0; 377 } 378 379 static void 380 bdev_ut_module_fini(void) 381 { 382 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 383 } 384 385 struct spdk_bdev_module bdev_ut_if = { 386 .name = "bdev_ut", 387 .module_init = bdev_ut_module_init, 388 .module_fini = bdev_ut_module_fini, 389 .async_init = true, 390 }; 391 392 static void vbdev_ut_examine(struct spdk_bdev *bdev); 393 394 static int 395 vbdev_ut_module_init(void) 396 { 397 return 0; 398 } 399 400 static void 401 vbdev_ut_module_fini(void) 402 { 403 } 404 405 struct spdk_bdev_module vbdev_ut_if = { 406 .name = "vbdev_ut", 407 .module_init = vbdev_ut_module_init, 408 .module_fini = vbdev_ut_module_fini, 409 .examine_config = vbdev_ut_examine, 410 }; 411 412 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 413 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 414 415 static void 416 vbdev_ut_examine(struct spdk_bdev *bdev) 417 { 418 spdk_bdev_module_examine_done(&vbdev_ut_if); 419 } 420 421 static struct spdk_bdev * 422 allocate_bdev(char *name) 423 { 424 struct spdk_bdev *bdev; 425 int rc; 426 427 bdev = calloc(1, sizeof(*bdev)); 428 SPDK_CU_ASSERT_FATAL(bdev != NULL); 429 430 bdev->name = name; 431 bdev->fn_table = &fn_table; 432 bdev->module = &bdev_ut_if; 433 bdev->blockcnt = 1024; 434 bdev->blocklen = 512; 435 436 rc = spdk_bdev_register(bdev); 437 CU_ASSERT(rc == 0); 438 439 return bdev; 440 } 441 442 static struct spdk_bdev * 443 allocate_vbdev(char *name) 444 { 445 struct spdk_bdev *bdev; 446 int rc; 447 448 bdev = calloc(1, sizeof(*bdev)); 449 SPDK_CU_ASSERT_FATAL(bdev != NULL); 450 451 bdev->name = name; 452 bdev->fn_table = &fn_table; 453 bdev->module = &vbdev_ut_if; 454 455 rc = spdk_bdev_register(bdev); 456 CU_ASSERT(rc == 0); 457 458 return bdev; 459 } 460 461 static void 462 free_bdev(struct spdk_bdev *bdev) 463 { 464 spdk_bdev_unregister(bdev, NULL, NULL); 465 poll_threads(); 466 memset(bdev, 0xFF, sizeof(*bdev)); 467 free(bdev); 468 } 469 470 static void 471 free_vbdev(struct spdk_bdev *bdev) 472 { 473 spdk_bdev_unregister(bdev, NULL, NULL); 474 poll_threads(); 475 memset(bdev, 0xFF, sizeof(*bdev)); 476 free(bdev); 477 } 478 479 static void 480 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 481 { 482 const char *bdev_name; 483 484 CU_ASSERT(bdev != NULL); 485 CU_ASSERT(rc == 0); 486 bdev_name = spdk_bdev_get_name(bdev); 487 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 488 489 free(stat); 490 491 *(bool *)cb_arg = true; 492 } 493 494 static void 495 bdev_unregister_cb(void *cb_arg, int rc) 496 { 497 g_unregister_arg = cb_arg; 498 g_unregister_rc = rc; 499 } 500 501 static void 502 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 503 { 504 } 505 506 static void 507 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 508 { 509 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 510 511 g_event_type1 = type; 512 if (SPDK_BDEV_EVENT_REMOVE == type) { 513 spdk_bdev_close(desc); 514 } 515 } 516 517 static void 518 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 519 { 520 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 521 522 g_event_type2 = type; 523 if (SPDK_BDEV_EVENT_REMOVE == type) { 524 spdk_bdev_close(desc); 525 } 526 } 527 528 static void 529 get_device_stat_test(void) 530 { 531 struct spdk_bdev *bdev; 532 struct spdk_bdev_io_stat *stat; 533 bool done; 534 535 bdev = allocate_bdev("bdev0"); 536 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 537 if (stat == NULL) { 538 free_bdev(bdev); 539 return; 540 } 541 542 done = false; 543 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 544 while (!done) { poll_threads(); } 545 546 free_bdev(bdev); 547 } 548 549 static void 550 open_write_test(void) 551 { 552 struct spdk_bdev *bdev[9]; 553 struct spdk_bdev_desc *desc[9] = {}; 554 int rc; 555 556 /* 557 * Create a tree of bdevs to test various open w/ write cases. 558 * 559 * bdev0 through bdev3 are physical block devices, such as NVMe 560 * namespaces or Ceph block devices. 561 * 562 * bdev4 is a virtual bdev with multiple base bdevs. This models 563 * caching or RAID use cases. 564 * 565 * bdev5 through bdev7 are all virtual bdevs with the same base 566 * bdev (except bdev7). This models partitioning or logical volume 567 * use cases. 568 * 569 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 570 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 571 * models caching, RAID, partitioning or logical volumes use cases. 572 * 573 * bdev8 is a virtual bdev with multiple base bdevs, but these 574 * base bdevs are themselves virtual bdevs. 575 * 576 * bdev8 577 * | 578 * +----------+ 579 * | | 580 * bdev4 bdev5 bdev6 bdev7 581 * | | | | 582 * +---+---+ +---+ + +---+---+ 583 * | | \ | / \ 584 * bdev0 bdev1 bdev2 bdev3 585 */ 586 587 bdev[0] = allocate_bdev("bdev0"); 588 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 589 CU_ASSERT(rc == 0); 590 591 bdev[1] = allocate_bdev("bdev1"); 592 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 593 CU_ASSERT(rc == 0); 594 595 bdev[2] = allocate_bdev("bdev2"); 596 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 597 CU_ASSERT(rc == 0); 598 599 bdev[3] = allocate_bdev("bdev3"); 600 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 601 CU_ASSERT(rc == 0); 602 603 bdev[4] = allocate_vbdev("bdev4"); 604 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 605 CU_ASSERT(rc == 0); 606 607 bdev[5] = allocate_vbdev("bdev5"); 608 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 609 CU_ASSERT(rc == 0); 610 611 bdev[6] = allocate_vbdev("bdev6"); 612 613 bdev[7] = allocate_vbdev("bdev7"); 614 615 bdev[8] = allocate_vbdev("bdev8"); 616 617 /* Open bdev0 read-only. This should succeed. */ 618 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 619 CU_ASSERT(rc == 0); 620 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 621 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 622 spdk_bdev_close(desc[0]); 623 624 /* 625 * Open bdev1 read/write. This should fail since bdev1 has been claimed 626 * by a vbdev module. 627 */ 628 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 629 CU_ASSERT(rc == -EPERM); 630 631 /* 632 * Open bdev4 read/write. This should fail since bdev3 has been claimed 633 * by a vbdev module. 634 */ 635 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 636 CU_ASSERT(rc == -EPERM); 637 638 /* Open bdev4 read-only. This should succeed. */ 639 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 640 CU_ASSERT(rc == 0); 641 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 642 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 643 spdk_bdev_close(desc[4]); 644 645 /* 646 * Open bdev8 read/write. This should succeed since it is a leaf 647 * bdev. 648 */ 649 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 650 CU_ASSERT(rc == 0); 651 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 652 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 653 spdk_bdev_close(desc[8]); 654 655 /* 656 * Open bdev5 read/write. This should fail since bdev4 has been claimed 657 * by a vbdev module. 658 */ 659 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 660 CU_ASSERT(rc == -EPERM); 661 662 /* Open bdev4 read-only. This should succeed. */ 663 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 664 CU_ASSERT(rc == 0); 665 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 666 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 667 spdk_bdev_close(desc[5]); 668 669 free_vbdev(bdev[8]); 670 671 free_vbdev(bdev[5]); 672 free_vbdev(bdev[6]); 673 free_vbdev(bdev[7]); 674 675 free_vbdev(bdev[4]); 676 677 free_bdev(bdev[0]); 678 free_bdev(bdev[1]); 679 free_bdev(bdev[2]); 680 free_bdev(bdev[3]); 681 } 682 683 static void 684 bytes_to_blocks_test(void) 685 { 686 struct spdk_bdev bdev; 687 uint64_t offset_blocks, num_blocks; 688 689 memset(&bdev, 0, sizeof(bdev)); 690 691 bdev.blocklen = 512; 692 693 /* All parameters valid */ 694 offset_blocks = 0; 695 num_blocks = 0; 696 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 697 CU_ASSERT(offset_blocks == 1); 698 CU_ASSERT(num_blocks == 2); 699 700 /* Offset not a block multiple */ 701 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 702 703 /* Length not a block multiple */ 704 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 705 706 /* In case blocklen not the power of two */ 707 bdev.blocklen = 100; 708 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 709 CU_ASSERT(offset_blocks == 1); 710 CU_ASSERT(num_blocks == 2); 711 712 /* Offset not a block multiple */ 713 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 714 715 /* Length not a block multiple */ 716 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 717 } 718 719 static void 720 num_blocks_test(void) 721 { 722 struct spdk_bdev bdev; 723 struct spdk_bdev_desc *desc = NULL; 724 int rc; 725 726 memset(&bdev, 0, sizeof(bdev)); 727 bdev.name = "num_blocks"; 728 bdev.fn_table = &fn_table; 729 bdev.module = &bdev_ut_if; 730 spdk_bdev_register(&bdev); 731 spdk_bdev_notify_blockcnt_change(&bdev, 50); 732 733 /* Growing block number */ 734 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 735 /* Shrinking block number */ 736 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 737 738 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 739 CU_ASSERT(rc == 0); 740 SPDK_CU_ASSERT_FATAL(desc != NULL); 741 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 742 743 /* Growing block number */ 744 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 745 /* Shrinking block number */ 746 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 747 748 g_event_type1 = 0xFF; 749 /* Growing block number */ 750 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 751 752 poll_threads(); 753 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 754 755 g_event_type1 = 0xFF; 756 /* Growing block number and closing */ 757 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 758 759 spdk_bdev_close(desc); 760 spdk_bdev_unregister(&bdev, NULL, NULL); 761 762 poll_threads(); 763 764 /* Callback is not called for closed device */ 765 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 766 } 767 768 static void 769 io_valid_test(void) 770 { 771 struct spdk_bdev bdev; 772 773 memset(&bdev, 0, sizeof(bdev)); 774 775 bdev.blocklen = 512; 776 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 777 778 spdk_bdev_notify_blockcnt_change(&bdev, 100); 779 780 /* All parameters valid */ 781 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 782 783 /* Last valid block */ 784 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 785 786 /* Offset past end of bdev */ 787 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 788 789 /* Offset + length past end of bdev */ 790 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 791 792 /* Offset near end of uint64_t range (2^64 - 1) */ 793 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 794 795 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 796 } 797 798 static void 799 alias_add_del_test(void) 800 { 801 struct spdk_bdev *bdev[3]; 802 int rc; 803 804 /* Creating and registering bdevs */ 805 bdev[0] = allocate_bdev("bdev0"); 806 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 807 808 bdev[1] = allocate_bdev("bdev1"); 809 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 810 811 bdev[2] = allocate_bdev("bdev2"); 812 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 813 814 poll_threads(); 815 816 /* 817 * Trying adding an alias identical to name. 818 * Alias is identical to name, so it can not be added to aliases list 819 */ 820 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 821 CU_ASSERT(rc == -EEXIST); 822 823 /* 824 * Trying to add empty alias, 825 * this one should fail 826 */ 827 rc = spdk_bdev_alias_add(bdev[0], NULL); 828 CU_ASSERT(rc == -EINVAL); 829 830 /* Trying adding same alias to two different registered bdevs */ 831 832 /* Alias is used first time, so this one should pass */ 833 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 834 CU_ASSERT(rc == 0); 835 836 /* Alias was added to another bdev, so this one should fail */ 837 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 838 CU_ASSERT(rc == -EEXIST); 839 840 /* Alias is used first time, so this one should pass */ 841 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 842 CU_ASSERT(rc == 0); 843 844 /* Trying removing an alias from registered bdevs */ 845 846 /* Alias is not on a bdev aliases list, so this one should fail */ 847 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 848 CU_ASSERT(rc == -ENOENT); 849 850 /* Alias is present on a bdev aliases list, so this one should pass */ 851 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 852 CU_ASSERT(rc == 0); 853 854 /* Alias is present on a bdev aliases list, so this one should pass */ 855 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 856 CU_ASSERT(rc == 0); 857 858 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 859 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 860 CU_ASSERT(rc != 0); 861 862 /* Trying to del all alias from empty alias list */ 863 spdk_bdev_alias_del_all(bdev[2]); 864 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 865 866 /* Trying to del all alias from non-empty alias list */ 867 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 868 CU_ASSERT(rc == 0); 869 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 870 CU_ASSERT(rc == 0); 871 spdk_bdev_alias_del_all(bdev[2]); 872 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 873 874 /* Unregister and free bdevs */ 875 spdk_bdev_unregister(bdev[0], NULL, NULL); 876 spdk_bdev_unregister(bdev[1], NULL, NULL); 877 spdk_bdev_unregister(bdev[2], NULL, NULL); 878 879 poll_threads(); 880 881 free(bdev[0]); 882 free(bdev[1]); 883 free(bdev[2]); 884 } 885 886 static void 887 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 888 { 889 g_io_done = true; 890 g_io_status = bdev_io->internal.status; 891 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 892 (bdev_io->u.bdev.zcopy.start)) { 893 g_zcopy_bdev_io = bdev_io; 894 } else { 895 spdk_bdev_free_io(bdev_io); 896 g_zcopy_bdev_io = NULL; 897 } 898 } 899 900 static void 901 bdev_init_cb(void *arg, int rc) 902 { 903 CU_ASSERT(rc == 0); 904 } 905 906 static void 907 bdev_fini_cb(void *arg) 908 { 909 } 910 911 struct bdev_ut_io_wait_entry { 912 struct spdk_bdev_io_wait_entry entry; 913 struct spdk_io_channel *io_ch; 914 struct spdk_bdev_desc *desc; 915 bool submitted; 916 }; 917 918 static void 919 io_wait_cb(void *arg) 920 { 921 struct bdev_ut_io_wait_entry *entry = arg; 922 int rc; 923 924 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 925 CU_ASSERT(rc == 0); 926 entry->submitted = true; 927 } 928 929 static void 930 bdev_io_types_test(void) 931 { 932 struct spdk_bdev *bdev; 933 struct spdk_bdev_desc *desc = NULL; 934 struct spdk_io_channel *io_ch; 935 struct spdk_bdev_opts bdev_opts = {}; 936 int rc; 937 938 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 939 bdev_opts.bdev_io_pool_size = 4; 940 bdev_opts.bdev_io_cache_size = 2; 941 942 rc = spdk_bdev_set_opts(&bdev_opts); 943 CU_ASSERT(rc == 0); 944 spdk_bdev_initialize(bdev_init_cb, NULL); 945 poll_threads(); 946 947 bdev = allocate_bdev("bdev0"); 948 949 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 950 CU_ASSERT(rc == 0); 951 poll_threads(); 952 SPDK_CU_ASSERT_FATAL(desc != NULL); 953 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 954 io_ch = spdk_bdev_get_io_channel(desc); 955 CU_ASSERT(io_ch != NULL); 956 957 /* WRITE and WRITE ZEROES are not supported */ 958 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 959 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 960 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 961 CU_ASSERT(rc == -ENOTSUP); 962 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 963 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 964 965 spdk_put_io_channel(io_ch); 966 spdk_bdev_close(desc); 967 free_bdev(bdev); 968 spdk_bdev_finish(bdev_fini_cb, NULL); 969 poll_threads(); 970 } 971 972 static void 973 bdev_io_wait_test(void) 974 { 975 struct spdk_bdev *bdev; 976 struct spdk_bdev_desc *desc = NULL; 977 struct spdk_io_channel *io_ch; 978 struct spdk_bdev_opts bdev_opts = {}; 979 struct bdev_ut_io_wait_entry io_wait_entry; 980 struct bdev_ut_io_wait_entry io_wait_entry2; 981 int rc; 982 983 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 984 bdev_opts.bdev_io_pool_size = 4; 985 bdev_opts.bdev_io_cache_size = 2; 986 987 rc = spdk_bdev_set_opts(&bdev_opts); 988 CU_ASSERT(rc == 0); 989 spdk_bdev_initialize(bdev_init_cb, NULL); 990 poll_threads(); 991 992 bdev = allocate_bdev("bdev0"); 993 994 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 995 CU_ASSERT(rc == 0); 996 poll_threads(); 997 SPDK_CU_ASSERT_FATAL(desc != NULL); 998 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 999 io_ch = spdk_bdev_get_io_channel(desc); 1000 CU_ASSERT(io_ch != NULL); 1001 1002 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1003 CU_ASSERT(rc == 0); 1004 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1005 CU_ASSERT(rc == 0); 1006 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1007 CU_ASSERT(rc == 0); 1008 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1009 CU_ASSERT(rc == 0); 1010 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1011 1012 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1013 CU_ASSERT(rc == -ENOMEM); 1014 1015 io_wait_entry.entry.bdev = bdev; 1016 io_wait_entry.entry.cb_fn = io_wait_cb; 1017 io_wait_entry.entry.cb_arg = &io_wait_entry; 1018 io_wait_entry.io_ch = io_ch; 1019 io_wait_entry.desc = desc; 1020 io_wait_entry.submitted = false; 1021 /* Cannot use the same io_wait_entry for two different calls. */ 1022 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1023 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1024 1025 /* Queue two I/O waits. */ 1026 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1027 CU_ASSERT(rc == 0); 1028 CU_ASSERT(io_wait_entry.submitted == false); 1029 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1030 CU_ASSERT(rc == 0); 1031 CU_ASSERT(io_wait_entry2.submitted == false); 1032 1033 stub_complete_io(1); 1034 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1035 CU_ASSERT(io_wait_entry.submitted == true); 1036 CU_ASSERT(io_wait_entry2.submitted == false); 1037 1038 stub_complete_io(1); 1039 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1040 CU_ASSERT(io_wait_entry2.submitted == true); 1041 1042 stub_complete_io(4); 1043 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1044 1045 spdk_put_io_channel(io_ch); 1046 spdk_bdev_close(desc); 1047 free_bdev(bdev); 1048 spdk_bdev_finish(bdev_fini_cb, NULL); 1049 poll_threads(); 1050 } 1051 1052 static void 1053 bdev_io_spans_split_test(void) 1054 { 1055 struct spdk_bdev bdev; 1056 struct spdk_bdev_io bdev_io; 1057 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1058 1059 memset(&bdev, 0, sizeof(bdev)); 1060 bdev_io.u.bdev.iovs = iov; 1061 1062 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1063 bdev.optimal_io_boundary = 0; 1064 bdev.max_segment_size = 0; 1065 bdev.max_num_segments = 0; 1066 bdev_io.bdev = &bdev; 1067 1068 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1069 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1070 1071 bdev.split_on_optimal_io_boundary = true; 1072 bdev.optimal_io_boundary = 32; 1073 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1074 1075 /* RESETs are not based on LBAs - so this should return false. */ 1076 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1077 1078 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1079 bdev_io.u.bdev.offset_blocks = 0; 1080 bdev_io.u.bdev.num_blocks = 32; 1081 1082 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1083 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1084 1085 bdev_io.u.bdev.num_blocks = 33; 1086 1087 /* This I/O spans a boundary. */ 1088 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1089 1090 bdev_io.u.bdev.num_blocks = 32; 1091 bdev.max_segment_size = 512 * 32; 1092 bdev.max_num_segments = 1; 1093 bdev_io.u.bdev.iovcnt = 1; 1094 iov[0].iov_len = 512; 1095 1096 /* Does not cross and exceed max_size or max_segs */ 1097 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1098 1099 bdev.split_on_optimal_io_boundary = false; 1100 bdev.max_segment_size = 512; 1101 bdev.max_num_segments = 1; 1102 bdev_io.u.bdev.iovcnt = 2; 1103 1104 /* Exceed max_segs */ 1105 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1106 1107 bdev.max_num_segments = 2; 1108 iov[0].iov_len = 513; 1109 iov[1].iov_len = 512; 1110 1111 /* Exceed max_sizes */ 1112 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1113 } 1114 1115 static void 1116 bdev_io_boundary_split_test(void) 1117 { 1118 struct spdk_bdev *bdev; 1119 struct spdk_bdev_desc *desc = NULL; 1120 struct spdk_io_channel *io_ch; 1121 struct spdk_bdev_opts bdev_opts = {}; 1122 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1123 struct ut_expected_io *expected_io; 1124 void *md_buf = (void *)0xFF000000; 1125 uint64_t i; 1126 int rc; 1127 1128 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1129 bdev_opts.bdev_io_pool_size = 512; 1130 bdev_opts.bdev_io_cache_size = 64; 1131 1132 rc = spdk_bdev_set_opts(&bdev_opts); 1133 CU_ASSERT(rc == 0); 1134 spdk_bdev_initialize(bdev_init_cb, NULL); 1135 1136 bdev = allocate_bdev("bdev0"); 1137 1138 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1139 CU_ASSERT(rc == 0); 1140 SPDK_CU_ASSERT_FATAL(desc != NULL); 1141 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1142 io_ch = spdk_bdev_get_io_channel(desc); 1143 CU_ASSERT(io_ch != NULL); 1144 1145 bdev->optimal_io_boundary = 16; 1146 bdev->split_on_optimal_io_boundary = false; 1147 1148 g_io_done = false; 1149 1150 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1151 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1152 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1153 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1154 1155 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1156 CU_ASSERT(rc == 0); 1157 CU_ASSERT(g_io_done == false); 1158 1159 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1160 stub_complete_io(1); 1161 CU_ASSERT(g_io_done == true); 1162 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1163 1164 bdev->split_on_optimal_io_boundary = true; 1165 bdev->md_interleave = false; 1166 bdev->md_len = 8; 1167 1168 /* Now test that a single-vector command is split correctly. 1169 * Offset 14, length 8, payload 0xF000 1170 * Child - Offset 14, length 2, payload 0xF000 1171 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1172 * 1173 * Set up the expected values before calling spdk_bdev_read_blocks 1174 */ 1175 g_io_done = false; 1176 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1177 expected_io->md_buf = md_buf; 1178 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1179 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1180 1181 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1182 expected_io->md_buf = md_buf + 2 * 8; 1183 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1184 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1185 1186 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1187 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1188 14, 8, io_done, NULL); 1189 CU_ASSERT(rc == 0); 1190 CU_ASSERT(g_io_done == false); 1191 1192 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1193 stub_complete_io(2); 1194 CU_ASSERT(g_io_done == true); 1195 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1196 1197 /* Now set up a more complex, multi-vector command that needs to be split, 1198 * including splitting iovecs. 1199 */ 1200 iov[0].iov_base = (void *)0x10000; 1201 iov[0].iov_len = 512; 1202 iov[1].iov_base = (void *)0x20000; 1203 iov[1].iov_len = 20 * 512; 1204 iov[2].iov_base = (void *)0x30000; 1205 iov[2].iov_len = 11 * 512; 1206 1207 g_io_done = false; 1208 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1209 expected_io->md_buf = md_buf; 1210 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1211 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1212 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1213 1214 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1215 expected_io->md_buf = md_buf + 2 * 8; 1216 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1217 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1218 1219 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1220 expected_io->md_buf = md_buf + 18 * 8; 1221 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1222 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1223 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1224 1225 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1226 14, 32, io_done, NULL); 1227 CU_ASSERT(rc == 0); 1228 CU_ASSERT(g_io_done == false); 1229 1230 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1231 stub_complete_io(3); 1232 CU_ASSERT(g_io_done == true); 1233 1234 /* Test multi vector command that needs to be split by strip and then needs to be 1235 * split further due to the capacity of child iovs. 1236 */ 1237 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1238 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1239 iov[i].iov_len = 512; 1240 } 1241 1242 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1243 g_io_done = false; 1244 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1245 BDEV_IO_NUM_CHILD_IOV); 1246 expected_io->md_buf = md_buf; 1247 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1248 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1249 } 1250 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1251 1252 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1253 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1254 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1255 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1256 ut_expected_io_set_iov(expected_io, i, 1257 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1258 } 1259 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1260 1261 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1262 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1263 CU_ASSERT(rc == 0); 1264 CU_ASSERT(g_io_done == false); 1265 1266 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1267 stub_complete_io(1); 1268 CU_ASSERT(g_io_done == false); 1269 1270 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1271 stub_complete_io(1); 1272 CU_ASSERT(g_io_done == true); 1273 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1274 1275 /* Test multi vector command that needs to be split by strip and then needs to be 1276 * split further due to the capacity of child iovs. In this case, the length of 1277 * the rest of iovec array with an I/O boundary is the multiple of block size. 1278 */ 1279 1280 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1281 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1282 */ 1283 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1284 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1285 iov[i].iov_len = 512; 1286 } 1287 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1288 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1289 iov[i].iov_len = 256; 1290 } 1291 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1292 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1293 1294 /* Add an extra iovec to trigger split */ 1295 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1296 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1297 1298 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1299 g_io_done = false; 1300 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1301 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1302 expected_io->md_buf = md_buf; 1303 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1304 ut_expected_io_set_iov(expected_io, i, 1305 (void *)((i + 1) * 0x10000), 512); 1306 } 1307 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1308 ut_expected_io_set_iov(expected_io, i, 1309 (void *)((i + 1) * 0x10000), 256); 1310 } 1311 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1312 1313 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1314 1, 1); 1315 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1316 ut_expected_io_set_iov(expected_io, 0, 1317 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1318 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1319 1320 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1321 1, 1); 1322 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1323 ut_expected_io_set_iov(expected_io, 0, 1324 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1325 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1326 1327 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1328 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1329 CU_ASSERT(rc == 0); 1330 CU_ASSERT(g_io_done == false); 1331 1332 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1333 stub_complete_io(1); 1334 CU_ASSERT(g_io_done == false); 1335 1336 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1337 stub_complete_io(2); 1338 CU_ASSERT(g_io_done == true); 1339 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1340 1341 /* Test multi vector command that needs to be split by strip and then needs to be 1342 * split further due to the capacity of child iovs, the child request offset should 1343 * be rewind to last aligned offset and go success without error. 1344 */ 1345 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1346 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1347 iov[i].iov_len = 512; 1348 } 1349 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1350 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1351 1352 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1353 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1354 1355 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1356 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1357 1358 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1359 g_io_done = false; 1360 g_io_status = 0; 1361 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1362 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1363 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1364 expected_io->md_buf = md_buf; 1365 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1366 ut_expected_io_set_iov(expected_io, i, 1367 (void *)((i + 1) * 0x10000), 512); 1368 } 1369 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1370 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1371 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1372 1, 2); 1373 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1374 ut_expected_io_set_iov(expected_io, 0, 1375 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1376 ut_expected_io_set_iov(expected_io, 1, 1377 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1378 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1379 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1380 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1381 1, 1); 1382 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1383 ut_expected_io_set_iov(expected_io, 0, 1384 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1385 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1386 1387 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1388 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1389 CU_ASSERT(rc == 0); 1390 CU_ASSERT(g_io_done == false); 1391 1392 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1393 stub_complete_io(1); 1394 CU_ASSERT(g_io_done == false); 1395 1396 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1397 stub_complete_io(2); 1398 CU_ASSERT(g_io_done == true); 1399 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1400 1401 /* Test multi vector command that needs to be split due to the IO boundary and 1402 * the capacity of child iovs. Especially test the case when the command is 1403 * split due to the capacity of child iovs, the tail address is not aligned with 1404 * block size and is rewinded to the aligned address. 1405 * 1406 * The iovecs used in read request is complex but is based on the data 1407 * collected in the real issue. We change the base addresses but keep the lengths 1408 * not to loose the credibility of the test. 1409 */ 1410 bdev->optimal_io_boundary = 128; 1411 g_io_done = false; 1412 g_io_status = 0; 1413 1414 for (i = 0; i < 31; i++) { 1415 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1416 iov[i].iov_len = 1024; 1417 } 1418 iov[31].iov_base = (void *)0xFEED1F00000; 1419 iov[31].iov_len = 32768; 1420 iov[32].iov_base = (void *)0xFEED2000000; 1421 iov[32].iov_len = 160; 1422 iov[33].iov_base = (void *)0xFEED2100000; 1423 iov[33].iov_len = 4096; 1424 iov[34].iov_base = (void *)0xFEED2200000; 1425 iov[34].iov_len = 4096; 1426 iov[35].iov_base = (void *)0xFEED2300000; 1427 iov[35].iov_len = 4096; 1428 iov[36].iov_base = (void *)0xFEED2400000; 1429 iov[36].iov_len = 4096; 1430 iov[37].iov_base = (void *)0xFEED2500000; 1431 iov[37].iov_len = 4096; 1432 iov[38].iov_base = (void *)0xFEED2600000; 1433 iov[38].iov_len = 4096; 1434 iov[39].iov_base = (void *)0xFEED2700000; 1435 iov[39].iov_len = 4096; 1436 iov[40].iov_base = (void *)0xFEED2800000; 1437 iov[40].iov_len = 4096; 1438 iov[41].iov_base = (void *)0xFEED2900000; 1439 iov[41].iov_len = 4096; 1440 iov[42].iov_base = (void *)0xFEED2A00000; 1441 iov[42].iov_len = 4096; 1442 iov[43].iov_base = (void *)0xFEED2B00000; 1443 iov[43].iov_len = 12288; 1444 iov[44].iov_base = (void *)0xFEED2C00000; 1445 iov[44].iov_len = 8192; 1446 iov[45].iov_base = (void *)0xFEED2F00000; 1447 iov[45].iov_len = 4096; 1448 iov[46].iov_base = (void *)0xFEED3000000; 1449 iov[46].iov_len = 4096; 1450 iov[47].iov_base = (void *)0xFEED3100000; 1451 iov[47].iov_len = 4096; 1452 iov[48].iov_base = (void *)0xFEED3200000; 1453 iov[48].iov_len = 24576; 1454 iov[49].iov_base = (void *)0xFEED3300000; 1455 iov[49].iov_len = 16384; 1456 iov[50].iov_base = (void *)0xFEED3400000; 1457 iov[50].iov_len = 12288; 1458 iov[51].iov_base = (void *)0xFEED3500000; 1459 iov[51].iov_len = 4096; 1460 iov[52].iov_base = (void *)0xFEED3600000; 1461 iov[52].iov_len = 4096; 1462 iov[53].iov_base = (void *)0xFEED3700000; 1463 iov[53].iov_len = 4096; 1464 iov[54].iov_base = (void *)0xFEED3800000; 1465 iov[54].iov_len = 28672; 1466 iov[55].iov_base = (void *)0xFEED3900000; 1467 iov[55].iov_len = 20480; 1468 iov[56].iov_base = (void *)0xFEED3A00000; 1469 iov[56].iov_len = 4096; 1470 iov[57].iov_base = (void *)0xFEED3B00000; 1471 iov[57].iov_len = 12288; 1472 iov[58].iov_base = (void *)0xFEED3C00000; 1473 iov[58].iov_len = 4096; 1474 iov[59].iov_base = (void *)0xFEED3D00000; 1475 iov[59].iov_len = 4096; 1476 iov[60].iov_base = (void *)0xFEED3E00000; 1477 iov[60].iov_len = 352; 1478 1479 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1480 * of child iovs, 1481 */ 1482 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1483 expected_io->md_buf = md_buf; 1484 for (i = 0; i < 32; i++) { 1485 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1486 } 1487 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1488 1489 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1490 * split by the IO boundary requirement. 1491 */ 1492 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1493 expected_io->md_buf = md_buf + 126 * 8; 1494 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1495 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1496 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1497 1498 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1499 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1500 */ 1501 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1502 expected_io->md_buf = md_buf + 128 * 8; 1503 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1504 iov[33].iov_len - 864); 1505 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1506 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1507 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1508 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1509 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1510 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1511 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1512 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1513 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1514 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1515 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1516 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1517 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1518 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1519 1520 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1521 * first 864 bytes of iov[52] split by the IO boundary requirement. 1522 */ 1523 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1524 expected_io->md_buf = md_buf + 256 * 8; 1525 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1526 iov[46].iov_len - 864); 1527 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1528 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1529 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1530 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1531 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1532 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1533 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1534 1535 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1536 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1537 */ 1538 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1539 expected_io->md_buf = md_buf + 384 * 8; 1540 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1541 iov[52].iov_len - 864); 1542 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1543 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1544 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1545 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1546 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1547 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1548 1549 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1550 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1551 */ 1552 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1553 expected_io->md_buf = md_buf + 512 * 8; 1554 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1555 iov[57].iov_len - 4960); 1556 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1557 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1558 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1559 1560 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1561 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1562 expected_io->md_buf = md_buf + 542 * 8; 1563 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1564 iov[59].iov_len - 3936); 1565 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1566 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1567 1568 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1569 0, 543, io_done, NULL); 1570 CU_ASSERT(rc == 0); 1571 CU_ASSERT(g_io_done == false); 1572 1573 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1574 stub_complete_io(1); 1575 CU_ASSERT(g_io_done == false); 1576 1577 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1578 stub_complete_io(5); 1579 CU_ASSERT(g_io_done == false); 1580 1581 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1582 stub_complete_io(1); 1583 CU_ASSERT(g_io_done == true); 1584 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1585 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1586 1587 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1588 * split, so test that. 1589 */ 1590 bdev->optimal_io_boundary = 15; 1591 g_io_done = false; 1592 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1593 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1594 1595 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1596 CU_ASSERT(rc == 0); 1597 CU_ASSERT(g_io_done == false); 1598 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1599 stub_complete_io(1); 1600 CU_ASSERT(g_io_done == true); 1601 1602 /* Test an UNMAP. This should also not be split. */ 1603 bdev->optimal_io_boundary = 16; 1604 g_io_done = false; 1605 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1606 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1607 1608 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1609 CU_ASSERT(rc == 0); 1610 CU_ASSERT(g_io_done == false); 1611 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1612 stub_complete_io(1); 1613 CU_ASSERT(g_io_done == true); 1614 1615 /* Test a FLUSH. This should also not be split. */ 1616 bdev->optimal_io_boundary = 16; 1617 g_io_done = false; 1618 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1619 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1620 1621 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1622 CU_ASSERT(rc == 0); 1623 CU_ASSERT(g_io_done == false); 1624 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1625 stub_complete_io(1); 1626 CU_ASSERT(g_io_done == true); 1627 1628 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1629 1630 /* Children requests return an error status */ 1631 bdev->optimal_io_boundary = 16; 1632 iov[0].iov_base = (void *)0x10000; 1633 iov[0].iov_len = 512 * 64; 1634 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1635 g_io_done = false; 1636 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1637 1638 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1639 CU_ASSERT(rc == 0); 1640 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1641 stub_complete_io(4); 1642 CU_ASSERT(g_io_done == false); 1643 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1644 stub_complete_io(1); 1645 CU_ASSERT(g_io_done == true); 1646 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1647 1648 /* Test if a multi vector command terminated with failure before continueing 1649 * splitting process when one of child I/O failed. 1650 * The multi vector command is as same as the above that needs to be split by strip 1651 * and then needs to be split further due to the capacity of child iovs. 1652 */ 1653 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1654 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1655 iov[i].iov_len = 512; 1656 } 1657 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1658 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1659 1660 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1661 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1662 1663 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1664 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1665 1666 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1667 1668 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1669 g_io_done = false; 1670 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1671 1672 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1673 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1674 CU_ASSERT(rc == 0); 1675 CU_ASSERT(g_io_done == false); 1676 1677 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1678 stub_complete_io(1); 1679 CU_ASSERT(g_io_done == true); 1680 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1681 1682 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1683 1684 /* for this test we will create the following conditions to hit the code path where 1685 * we are trying to send and IO following a split that has no iovs because we had to 1686 * trim them for alignment reasons. 1687 * 1688 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1689 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1690 * position 30 and overshoot by 0x2e. 1691 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1692 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1693 * which eliniates that vector so we just send the first split IO with 30 vectors 1694 * and let the completion pick up the last 2 vectors. 1695 */ 1696 bdev->optimal_io_boundary = 32; 1697 bdev->split_on_optimal_io_boundary = true; 1698 g_io_done = false; 1699 1700 /* Init all parent IOVs to 0x212 */ 1701 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1702 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1703 iov[i].iov_len = 0x212; 1704 } 1705 1706 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1707 BDEV_IO_NUM_CHILD_IOV - 1); 1708 /* expect 0-29 to be 1:1 with the parent iov */ 1709 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1710 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1711 } 1712 1713 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1714 * where 0x1e is the amount we overshot the 16K boundary 1715 */ 1716 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1717 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1718 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1719 1720 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1721 * shortened that take it to the next boundary and then a final one to get us to 1722 * 0x4200 bytes for the IO. 1723 */ 1724 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1725 BDEV_IO_NUM_CHILD_IOV, 2); 1726 /* position 30 picked up the remaining bytes to the next boundary */ 1727 ut_expected_io_set_iov(expected_io, 0, 1728 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1729 1730 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1731 ut_expected_io_set_iov(expected_io, 1, 1732 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1733 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1734 1735 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1736 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1737 CU_ASSERT(rc == 0); 1738 CU_ASSERT(g_io_done == false); 1739 1740 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1741 stub_complete_io(1); 1742 CU_ASSERT(g_io_done == false); 1743 1744 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1745 stub_complete_io(1); 1746 CU_ASSERT(g_io_done == true); 1747 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1748 1749 spdk_put_io_channel(io_ch); 1750 spdk_bdev_close(desc); 1751 free_bdev(bdev); 1752 spdk_bdev_finish(bdev_fini_cb, NULL); 1753 poll_threads(); 1754 } 1755 1756 static void 1757 bdev_io_max_size_and_segment_split_test(void) 1758 { 1759 struct spdk_bdev *bdev; 1760 struct spdk_bdev_desc *desc = NULL; 1761 struct spdk_io_channel *io_ch; 1762 struct spdk_bdev_opts bdev_opts = {}; 1763 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1764 struct ut_expected_io *expected_io; 1765 uint64_t i; 1766 int rc; 1767 1768 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1769 bdev_opts.bdev_io_pool_size = 512; 1770 bdev_opts.bdev_io_cache_size = 64; 1771 1772 bdev_opts.opts_size = sizeof(bdev_opts); 1773 rc = spdk_bdev_set_opts(&bdev_opts); 1774 CU_ASSERT(rc == 0); 1775 spdk_bdev_initialize(bdev_init_cb, NULL); 1776 1777 bdev = allocate_bdev("bdev0"); 1778 1779 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1780 CU_ASSERT(rc == 0); 1781 SPDK_CU_ASSERT_FATAL(desc != NULL); 1782 io_ch = spdk_bdev_get_io_channel(desc); 1783 CU_ASSERT(io_ch != NULL); 1784 1785 bdev->split_on_optimal_io_boundary = false; 1786 bdev->optimal_io_boundary = 0; 1787 1788 /* Case 0 max_num_segments == 0. 1789 * but segment size 2 * 512 > 512 1790 */ 1791 bdev->max_segment_size = 512; 1792 bdev->max_num_segments = 0; 1793 g_io_done = false; 1794 1795 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1796 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1797 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1798 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1799 1800 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1801 CU_ASSERT(rc == 0); 1802 CU_ASSERT(g_io_done == false); 1803 1804 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1805 stub_complete_io(1); 1806 CU_ASSERT(g_io_done == true); 1807 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1808 1809 /* Case 1 max_segment_size == 0 1810 * but iov num 2 > 1. 1811 */ 1812 bdev->max_segment_size = 0; 1813 bdev->max_num_segments = 1; 1814 g_io_done = false; 1815 1816 iov[0].iov_base = (void *)0x10000; 1817 iov[0].iov_len = 512; 1818 iov[1].iov_base = (void *)0x20000; 1819 iov[1].iov_len = 8 * 512; 1820 1821 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1822 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1823 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1824 1825 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1826 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1827 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1828 1829 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1830 CU_ASSERT(rc == 0); 1831 CU_ASSERT(g_io_done == false); 1832 1833 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1834 stub_complete_io(2); 1835 CU_ASSERT(g_io_done == true); 1836 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1837 1838 /* Test that a non-vector command is split correctly. 1839 * Set up the expected values before calling spdk_bdev_read_blocks 1840 */ 1841 bdev->max_segment_size = 512; 1842 bdev->max_num_segments = 1; 1843 g_io_done = false; 1844 1845 /* Child IO 0 */ 1846 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1847 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1848 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1849 1850 /* Child IO 1 */ 1851 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1852 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1853 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1854 1855 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1856 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1857 CU_ASSERT(rc == 0); 1858 CU_ASSERT(g_io_done == false); 1859 1860 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1861 stub_complete_io(2); 1862 CU_ASSERT(g_io_done == true); 1863 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1864 1865 /* Now set up a more complex, multi-vector command that needs to be split, 1866 * including splitting iovecs. 1867 */ 1868 bdev->max_segment_size = 2 * 512; 1869 bdev->max_num_segments = 1; 1870 g_io_done = false; 1871 1872 iov[0].iov_base = (void *)0x10000; 1873 iov[0].iov_len = 2 * 512; 1874 iov[1].iov_base = (void *)0x20000; 1875 iov[1].iov_len = 4 * 512; 1876 iov[2].iov_base = (void *)0x30000; 1877 iov[2].iov_len = 6 * 512; 1878 1879 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1880 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1881 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1882 1883 /* Split iov[1].size to 2 iov entries then split the segments */ 1884 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1885 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1886 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1887 1888 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1889 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1890 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1891 1892 /* Split iov[2].size to 3 iov entries then split the segments */ 1893 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1894 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1895 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1896 1897 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1898 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1899 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1900 1901 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1902 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1903 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1904 1905 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1906 CU_ASSERT(rc == 0); 1907 CU_ASSERT(g_io_done == false); 1908 1909 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 1910 stub_complete_io(6); 1911 CU_ASSERT(g_io_done == true); 1912 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1913 1914 /* Test multi vector command that needs to be split by strip and then needs to be 1915 * split further due to the capacity of parent IO child iovs. 1916 */ 1917 bdev->max_segment_size = 512; 1918 bdev->max_num_segments = 1; 1919 g_io_done = false; 1920 1921 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1922 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1923 iov[i].iov_len = 512 * 2; 1924 } 1925 1926 /* Each input iov.size is split into 2 iovs, 1927 * half of the input iov can fill all child iov entries of a single IO. 1928 */ 1929 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 1930 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 1931 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1932 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1933 1934 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 1935 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1936 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1937 } 1938 1939 /* The remaining iov is split in the second round */ 1940 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1941 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 1942 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1943 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1944 1945 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 1946 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1947 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1948 } 1949 1950 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 1951 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1952 CU_ASSERT(rc == 0); 1953 CU_ASSERT(g_io_done == false); 1954 1955 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1956 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1957 CU_ASSERT(g_io_done == false); 1958 1959 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1960 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1961 CU_ASSERT(g_io_done == true); 1962 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1963 1964 /* A wrong case, a child IO that is divided does 1965 * not meet the principle of multiples of block size, 1966 * and exits with error 1967 */ 1968 bdev->max_segment_size = 512; 1969 bdev->max_num_segments = 1; 1970 g_io_done = false; 1971 1972 iov[0].iov_base = (void *)0x10000; 1973 iov[0].iov_len = 512 + 256; 1974 iov[1].iov_base = (void *)0x20000; 1975 iov[1].iov_len = 256; 1976 1977 /* iov[0] is split to 512 and 256. 1978 * 256 is less than a block size, and it is found 1979 * in the next round of split that it is the first child IO smaller than 1980 * the block size, so the error exit 1981 */ 1982 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 1983 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 1984 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1985 1986 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 1987 CU_ASSERT(rc == 0); 1988 CU_ASSERT(g_io_done == false); 1989 1990 /* First child IO is OK */ 1991 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1992 stub_complete_io(1); 1993 CU_ASSERT(g_io_done == true); 1994 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1995 1996 /* error exit */ 1997 stub_complete_io(1); 1998 CU_ASSERT(g_io_done == true); 1999 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2000 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2001 2002 /* Test multi vector command that needs to be split by strip and then needs to be 2003 * split further due to the capacity of child iovs. 2004 * 2005 * In this case, the last two iovs need to be split, but it will exceed the capacity 2006 * of child iovs, so it needs to wait until the first batch completed. 2007 */ 2008 bdev->max_segment_size = 512; 2009 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2010 g_io_done = false; 2011 2012 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2013 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2014 iov[i].iov_len = 512; 2015 } 2016 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2017 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2018 iov[i].iov_len = 512 * 2; 2019 } 2020 2021 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2022 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2023 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2024 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2025 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2026 } 2027 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2028 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2029 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2030 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2031 2032 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2033 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2034 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2035 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2036 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2037 2038 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2039 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2040 CU_ASSERT(rc == 0); 2041 CU_ASSERT(g_io_done == false); 2042 2043 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2044 stub_complete_io(1); 2045 CU_ASSERT(g_io_done == false); 2046 2047 /* Next round */ 2048 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2049 stub_complete_io(1); 2050 CU_ASSERT(g_io_done == true); 2051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2052 2053 /* This case is similar to the previous one, but the io composed of 2054 * the last few entries of child iov is not enough for a blocklen, so they 2055 * cannot be put into this IO, but wait until the next time. 2056 */ 2057 bdev->max_segment_size = 512; 2058 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2059 g_io_done = false; 2060 2061 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2062 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2063 iov[i].iov_len = 512; 2064 } 2065 2066 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2067 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2068 iov[i].iov_len = 128; 2069 } 2070 2071 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2072 * Because the left 2 iov is not enough for a blocklen. 2073 */ 2074 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2075 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2076 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2077 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2078 } 2079 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2080 2081 /* The second child io waits until the end of the first child io before executing. 2082 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2083 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2084 */ 2085 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2086 1, 4); 2087 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2088 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2089 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2090 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2091 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2092 2093 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2094 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2095 CU_ASSERT(rc == 0); 2096 CU_ASSERT(g_io_done == false); 2097 2098 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2099 stub_complete_io(1); 2100 CU_ASSERT(g_io_done == false); 2101 2102 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2103 stub_complete_io(1); 2104 CU_ASSERT(g_io_done == true); 2105 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2106 2107 /* A very complicated case. Each sg entry exceeds max_segment_size and 2108 * needs to be split. At the same time, child io must be a multiple of blocklen. 2109 * At the same time, child iovcnt exceeds parent iovcnt. 2110 */ 2111 bdev->max_segment_size = 512 + 128; 2112 bdev->max_num_segments = 3; 2113 g_io_done = false; 2114 2115 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2116 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2117 iov[i].iov_len = 512 + 256; 2118 } 2119 2120 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2121 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2122 iov[i].iov_len = 512 + 128; 2123 } 2124 2125 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2126 * Consume 4 parent IO iov entries per for() round and 6 block size. 2127 * Generate 9 child IOs. 2128 */ 2129 for (i = 0; i < 3; i++) { 2130 uint32_t j = i * 4; 2131 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2132 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2133 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2134 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2135 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2136 2137 /* Child io must be a multiple of blocklen 2138 * iov[j + 2] must be split. If the third entry is also added, 2139 * the multiple of blocklen cannot be guaranteed. But it still 2140 * occupies one iov entry of the parent child iov. 2141 */ 2142 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2143 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2144 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2145 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2146 2147 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2148 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2149 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2150 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2151 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2152 } 2153 2154 /* Child iov position at 27, the 10th child IO 2155 * iov entry index is 3 * 4 and offset is 3 * 6 2156 */ 2157 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2158 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2159 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2160 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2161 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2162 2163 /* Child iov position at 30, the 11th child IO */ 2164 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2165 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2166 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2167 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2168 2169 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2170 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2171 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2172 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2173 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2174 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2175 2176 /* Consume 9 child IOs and 27 child iov entries. 2177 * Consume 4 parent IO iov entries per for() round and 6 block size. 2178 * Parent IO iov index start from 16 and block offset start from 24 2179 */ 2180 for (i = 0; i < 3; i++) { 2181 uint32_t j = i * 4 + 16; 2182 uint32_t offset = i * 6 + 24; 2183 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2184 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2185 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2186 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2187 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2188 2189 /* Child io must be a multiple of blocklen 2190 * iov[j + 2] must be split. If the third entry is also added, 2191 * the multiple of blocklen cannot be guaranteed. But it still 2192 * occupies one iov entry of the parent child iov. 2193 */ 2194 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2195 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2196 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2197 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2198 2199 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2200 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2201 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2202 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2203 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2204 } 2205 2206 /* The 22th child IO, child iov position at 30 */ 2207 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2208 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2209 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2210 2211 /* The third round */ 2212 /* Here is the 23nd child IO and child iovpos is 0 */ 2213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2214 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2215 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2216 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2217 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2218 2219 /* The 24th child IO */ 2220 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2221 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2222 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2223 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2224 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2225 2226 /* The 25th child IO */ 2227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2228 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2229 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2230 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2231 2232 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2233 50, io_done, NULL); 2234 CU_ASSERT(rc == 0); 2235 CU_ASSERT(g_io_done == false); 2236 2237 /* Parent IO supports up to 32 child iovs, so it is calculated that 2238 * a maximum of 11 IOs can be split at a time, and the 2239 * splitting will continue after the first batch is over. 2240 */ 2241 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2242 stub_complete_io(11); 2243 CU_ASSERT(g_io_done == false); 2244 2245 /* The 2nd round */ 2246 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2247 stub_complete_io(11); 2248 CU_ASSERT(g_io_done == false); 2249 2250 /* The last round */ 2251 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2252 stub_complete_io(3); 2253 CU_ASSERT(g_io_done == true); 2254 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2255 2256 /* Test an WRITE_ZEROES. This should also not be split. */ 2257 bdev->max_segment_size = 512; 2258 bdev->max_num_segments = 1; 2259 g_io_done = false; 2260 2261 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2262 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2263 2264 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2265 CU_ASSERT(rc == 0); 2266 CU_ASSERT(g_io_done == false); 2267 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2268 stub_complete_io(1); 2269 CU_ASSERT(g_io_done == true); 2270 2271 /* Test an UNMAP. This should also not be split. */ 2272 g_io_done = false; 2273 2274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2275 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2276 2277 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2278 CU_ASSERT(rc == 0); 2279 CU_ASSERT(g_io_done == false); 2280 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2281 stub_complete_io(1); 2282 CU_ASSERT(g_io_done == true); 2283 2284 /* Test a FLUSH. This should also not be split. */ 2285 g_io_done = false; 2286 2287 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2288 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2289 2290 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2291 CU_ASSERT(rc == 0); 2292 CU_ASSERT(g_io_done == false); 2293 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2294 stub_complete_io(1); 2295 CU_ASSERT(g_io_done == true); 2296 2297 spdk_put_io_channel(io_ch); 2298 spdk_bdev_close(desc); 2299 free_bdev(bdev); 2300 spdk_bdev_finish(bdev_fini_cb, NULL); 2301 poll_threads(); 2302 } 2303 2304 static void 2305 bdev_io_mix_split_test(void) 2306 { 2307 struct spdk_bdev *bdev; 2308 struct spdk_bdev_desc *desc = NULL; 2309 struct spdk_io_channel *io_ch; 2310 struct spdk_bdev_opts bdev_opts = {}; 2311 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2312 struct ut_expected_io *expected_io; 2313 uint64_t i; 2314 int rc; 2315 2316 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2317 bdev_opts.bdev_io_pool_size = 512; 2318 bdev_opts.bdev_io_cache_size = 64; 2319 2320 rc = spdk_bdev_set_opts(&bdev_opts); 2321 CU_ASSERT(rc == 0); 2322 spdk_bdev_initialize(bdev_init_cb, NULL); 2323 2324 bdev = allocate_bdev("bdev0"); 2325 2326 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2327 CU_ASSERT(rc == 0); 2328 SPDK_CU_ASSERT_FATAL(desc != NULL); 2329 io_ch = spdk_bdev_get_io_channel(desc); 2330 CU_ASSERT(io_ch != NULL); 2331 2332 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2333 bdev->split_on_optimal_io_boundary = true; 2334 bdev->optimal_io_boundary = 16; 2335 2336 bdev->max_segment_size = 512; 2337 bdev->max_num_segments = 16; 2338 g_io_done = false; 2339 2340 /* IO crossing the IO boundary requires split 2341 * Total 2 child IOs. 2342 */ 2343 2344 /* The 1st child IO split the segment_size to multiple segment entry */ 2345 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2346 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2347 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2348 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2349 2350 /* The 2nd child IO split the segment_size to multiple segment entry */ 2351 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2352 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2353 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2354 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2355 2356 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2357 CU_ASSERT(rc == 0); 2358 CU_ASSERT(g_io_done == false); 2359 2360 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2361 stub_complete_io(2); 2362 CU_ASSERT(g_io_done == true); 2363 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2364 2365 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2366 bdev->max_segment_size = 15 * 512; 2367 bdev->max_num_segments = 1; 2368 g_io_done = false; 2369 2370 /* IO crossing the IO boundary requires split. 2371 * The 1st child IO segment size exceeds the max_segment_size, 2372 * So 1st child IO will be splitted to multiple segment entry. 2373 * Then it split to 2 child IOs because of the max_num_segments. 2374 * Total 3 child IOs. 2375 */ 2376 2377 /* The first 2 IOs are in an IO boundary. 2378 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2379 * So it split to the first 2 IOs. 2380 */ 2381 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2382 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2383 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2384 2385 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2386 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2387 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2388 2389 /* The 3rd Child IO is because of the io boundary */ 2390 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2391 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2392 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2393 2394 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2395 CU_ASSERT(rc == 0); 2396 CU_ASSERT(g_io_done == false); 2397 2398 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2399 stub_complete_io(3); 2400 CU_ASSERT(g_io_done == true); 2401 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2402 2403 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2404 bdev->max_segment_size = 17 * 512; 2405 bdev->max_num_segments = 1; 2406 g_io_done = false; 2407 2408 /* IO crossing the IO boundary requires split. 2409 * Child IO does not split. 2410 * Total 2 child IOs. 2411 */ 2412 2413 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2414 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2415 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2416 2417 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2418 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2419 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2420 2421 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2422 CU_ASSERT(rc == 0); 2423 CU_ASSERT(g_io_done == false); 2424 2425 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2426 stub_complete_io(2); 2427 CU_ASSERT(g_io_done == true); 2428 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2429 2430 /* Now set up a more complex, multi-vector command that needs to be split, 2431 * including splitting iovecs. 2432 * optimal_io_boundary < max_segment_size * max_num_segments 2433 */ 2434 bdev->max_segment_size = 3 * 512; 2435 bdev->max_num_segments = 6; 2436 g_io_done = false; 2437 2438 iov[0].iov_base = (void *)0x10000; 2439 iov[0].iov_len = 4 * 512; 2440 iov[1].iov_base = (void *)0x20000; 2441 iov[1].iov_len = 4 * 512; 2442 iov[2].iov_base = (void *)0x30000; 2443 iov[2].iov_len = 10 * 512; 2444 2445 /* IO crossing the IO boundary requires split. 2446 * The 1st child IO segment size exceeds the max_segment_size and after 2447 * splitting segment_size, the num_segments exceeds max_num_segments. 2448 * So 1st child IO will be splitted to 2 child IOs. 2449 * Total 3 child IOs. 2450 */ 2451 2452 /* The first 2 IOs are in an IO boundary. 2453 * After splitting segmemt size the segment num exceeds. 2454 * So it splits to 2 child IOs. 2455 */ 2456 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2457 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2458 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2459 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2460 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2461 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2462 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2463 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2464 2465 /* The 2nd child IO has the left segment entry */ 2466 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2467 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2468 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2469 2470 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2471 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2472 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2473 2474 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2475 CU_ASSERT(rc == 0); 2476 CU_ASSERT(g_io_done == false); 2477 2478 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2479 stub_complete_io(3); 2480 CU_ASSERT(g_io_done == true); 2481 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2482 2483 /* A very complicated case. Each sg entry exceeds max_segment_size 2484 * and split on io boundary. 2485 * optimal_io_boundary < max_segment_size * max_num_segments 2486 */ 2487 bdev->max_segment_size = 3 * 512; 2488 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2489 g_io_done = false; 2490 2491 for (i = 0; i < 20; i++) { 2492 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2493 iov[i].iov_len = 512 * 4; 2494 } 2495 2496 /* IO crossing the IO boundary requires split. 2497 * 80 block length can split 5 child IOs base on offset and IO boundary. 2498 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2499 * Total 5 child IOs. 2500 */ 2501 2502 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2503 * So each child IO occupies 8 child iov entries. 2504 */ 2505 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2506 for (i = 0; i < 4; i++) { 2507 int iovcnt = i * 2; 2508 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2509 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2510 } 2511 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2512 2513 /* 2nd child IO and total 16 child iov entries of parent IO */ 2514 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2515 for (i = 4; i < 8; i++) { 2516 int iovcnt = (i - 4) * 2; 2517 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2518 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2519 } 2520 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2521 2522 /* 3rd child IO and total 24 child iov entries of parent IO */ 2523 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2524 for (i = 8; i < 12; i++) { 2525 int iovcnt = (i - 8) * 2; 2526 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2527 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2528 } 2529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2530 2531 /* 4th child IO and total 32 child iov entries of parent IO */ 2532 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2533 for (i = 12; i < 16; i++) { 2534 int iovcnt = (i - 12) * 2; 2535 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2536 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2537 } 2538 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2539 2540 /* 5th child IO and because of the child iov entry it should be splitted 2541 * in next round. 2542 */ 2543 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2544 for (i = 16; i < 20; i++) { 2545 int iovcnt = (i - 16) * 2; 2546 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2547 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2548 } 2549 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2550 2551 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2552 CU_ASSERT(rc == 0); 2553 CU_ASSERT(g_io_done == false); 2554 2555 /* First split round */ 2556 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2557 stub_complete_io(4); 2558 CU_ASSERT(g_io_done == false); 2559 2560 /* Second split round */ 2561 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2562 stub_complete_io(1); 2563 CU_ASSERT(g_io_done == true); 2564 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2565 2566 spdk_put_io_channel(io_ch); 2567 spdk_bdev_close(desc); 2568 free_bdev(bdev); 2569 spdk_bdev_finish(bdev_fini_cb, NULL); 2570 poll_threads(); 2571 } 2572 2573 static void 2574 bdev_io_split_with_io_wait(void) 2575 { 2576 struct spdk_bdev *bdev; 2577 struct spdk_bdev_desc *desc = NULL; 2578 struct spdk_io_channel *io_ch; 2579 struct spdk_bdev_channel *channel; 2580 struct spdk_bdev_mgmt_channel *mgmt_ch; 2581 struct spdk_bdev_opts bdev_opts = {}; 2582 struct iovec iov[3]; 2583 struct ut_expected_io *expected_io; 2584 int rc; 2585 2586 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2587 bdev_opts.bdev_io_pool_size = 2; 2588 bdev_opts.bdev_io_cache_size = 1; 2589 2590 rc = spdk_bdev_set_opts(&bdev_opts); 2591 CU_ASSERT(rc == 0); 2592 spdk_bdev_initialize(bdev_init_cb, NULL); 2593 2594 bdev = allocate_bdev("bdev0"); 2595 2596 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2597 CU_ASSERT(rc == 0); 2598 CU_ASSERT(desc != NULL); 2599 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2600 io_ch = spdk_bdev_get_io_channel(desc); 2601 CU_ASSERT(io_ch != NULL); 2602 channel = spdk_io_channel_get_ctx(io_ch); 2603 mgmt_ch = channel->shared_resource->mgmt_ch; 2604 2605 bdev->optimal_io_boundary = 16; 2606 bdev->split_on_optimal_io_boundary = true; 2607 2608 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2609 CU_ASSERT(rc == 0); 2610 2611 /* Now test that a single-vector command is split correctly. 2612 * Offset 14, length 8, payload 0xF000 2613 * Child - Offset 14, length 2, payload 0xF000 2614 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2615 * 2616 * Set up the expected values before calling spdk_bdev_read_blocks 2617 */ 2618 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2619 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2620 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2621 2622 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2623 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2624 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2625 2626 /* The following children will be submitted sequentially due to the capacity of 2627 * spdk_bdev_io. 2628 */ 2629 2630 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2631 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2632 CU_ASSERT(rc == 0); 2633 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2634 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2635 2636 /* Completing the first read I/O will submit the first child */ 2637 stub_complete_io(1); 2638 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2639 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2640 2641 /* Completing the first child will submit the second child */ 2642 stub_complete_io(1); 2643 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2644 2645 /* Complete the second child I/O. This should result in our callback getting 2646 * invoked since the parent I/O is now complete. 2647 */ 2648 stub_complete_io(1); 2649 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2650 2651 /* Now set up a more complex, multi-vector command that needs to be split, 2652 * including splitting iovecs. 2653 */ 2654 iov[0].iov_base = (void *)0x10000; 2655 iov[0].iov_len = 512; 2656 iov[1].iov_base = (void *)0x20000; 2657 iov[1].iov_len = 20 * 512; 2658 iov[2].iov_base = (void *)0x30000; 2659 iov[2].iov_len = 11 * 512; 2660 2661 g_io_done = false; 2662 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2663 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2664 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2665 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2666 2667 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2668 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2669 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2670 2671 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2672 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2673 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2674 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2675 2676 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2677 CU_ASSERT(rc == 0); 2678 CU_ASSERT(g_io_done == false); 2679 2680 /* The following children will be submitted sequentially due to the capacity of 2681 * spdk_bdev_io. 2682 */ 2683 2684 /* Completing the first child will submit the second child */ 2685 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2686 stub_complete_io(1); 2687 CU_ASSERT(g_io_done == false); 2688 2689 /* Completing the second child will submit the third child */ 2690 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2691 stub_complete_io(1); 2692 CU_ASSERT(g_io_done == false); 2693 2694 /* Completing the third child will result in our callback getting invoked 2695 * since the parent I/O is now complete. 2696 */ 2697 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2698 stub_complete_io(1); 2699 CU_ASSERT(g_io_done == true); 2700 2701 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2702 2703 spdk_put_io_channel(io_ch); 2704 spdk_bdev_close(desc); 2705 free_bdev(bdev); 2706 spdk_bdev_finish(bdev_fini_cb, NULL); 2707 poll_threads(); 2708 } 2709 2710 static void 2711 bdev_io_alignment(void) 2712 { 2713 struct spdk_bdev *bdev; 2714 struct spdk_bdev_desc *desc = NULL; 2715 struct spdk_io_channel *io_ch; 2716 struct spdk_bdev_opts bdev_opts = {}; 2717 int rc; 2718 void *buf = NULL; 2719 struct iovec iovs[2]; 2720 int iovcnt; 2721 uint64_t alignment; 2722 2723 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2724 bdev_opts.bdev_io_pool_size = 20; 2725 bdev_opts.bdev_io_cache_size = 2; 2726 2727 rc = spdk_bdev_set_opts(&bdev_opts); 2728 CU_ASSERT(rc == 0); 2729 spdk_bdev_initialize(bdev_init_cb, NULL); 2730 2731 fn_table.submit_request = stub_submit_request_get_buf; 2732 bdev = allocate_bdev("bdev0"); 2733 2734 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2735 CU_ASSERT(rc == 0); 2736 CU_ASSERT(desc != NULL); 2737 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2738 io_ch = spdk_bdev_get_io_channel(desc); 2739 CU_ASSERT(io_ch != NULL); 2740 2741 /* Create aligned buffer */ 2742 rc = posix_memalign(&buf, 4096, 8192); 2743 SPDK_CU_ASSERT_FATAL(rc == 0); 2744 2745 /* Pass aligned single buffer with no alignment required */ 2746 alignment = 1; 2747 bdev->required_alignment = spdk_u32log2(alignment); 2748 2749 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2750 CU_ASSERT(rc == 0); 2751 stub_complete_io(1); 2752 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2753 alignment)); 2754 2755 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2756 CU_ASSERT(rc == 0); 2757 stub_complete_io(1); 2758 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2759 alignment)); 2760 2761 /* Pass unaligned single buffer with no alignment required */ 2762 alignment = 1; 2763 bdev->required_alignment = spdk_u32log2(alignment); 2764 2765 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2766 CU_ASSERT(rc == 0); 2767 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2768 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2769 stub_complete_io(1); 2770 2771 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2772 CU_ASSERT(rc == 0); 2773 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2774 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2775 stub_complete_io(1); 2776 2777 /* Pass unaligned single buffer with 512 alignment required */ 2778 alignment = 512; 2779 bdev->required_alignment = spdk_u32log2(alignment); 2780 2781 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2782 CU_ASSERT(rc == 0); 2783 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2784 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2785 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2786 alignment)); 2787 stub_complete_io(1); 2788 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2789 2790 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2791 CU_ASSERT(rc == 0); 2792 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2793 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2794 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2795 alignment)); 2796 stub_complete_io(1); 2797 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2798 2799 /* Pass unaligned single buffer with 4096 alignment required */ 2800 alignment = 4096; 2801 bdev->required_alignment = spdk_u32log2(alignment); 2802 2803 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2804 CU_ASSERT(rc == 0); 2805 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2806 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2807 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2808 alignment)); 2809 stub_complete_io(1); 2810 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2811 2812 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2813 CU_ASSERT(rc == 0); 2814 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2815 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2816 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2817 alignment)); 2818 stub_complete_io(1); 2819 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2820 2821 /* Pass aligned iovs with no alignment required */ 2822 alignment = 1; 2823 bdev->required_alignment = spdk_u32log2(alignment); 2824 2825 iovcnt = 1; 2826 iovs[0].iov_base = buf; 2827 iovs[0].iov_len = 512; 2828 2829 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2830 CU_ASSERT(rc == 0); 2831 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2832 stub_complete_io(1); 2833 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2834 2835 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2836 CU_ASSERT(rc == 0); 2837 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2838 stub_complete_io(1); 2839 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2840 2841 /* Pass unaligned iovs with no alignment required */ 2842 alignment = 1; 2843 bdev->required_alignment = spdk_u32log2(alignment); 2844 2845 iovcnt = 2; 2846 iovs[0].iov_base = buf + 16; 2847 iovs[0].iov_len = 256; 2848 iovs[1].iov_base = buf + 16 + 256 + 32; 2849 iovs[1].iov_len = 256; 2850 2851 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2852 CU_ASSERT(rc == 0); 2853 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2854 stub_complete_io(1); 2855 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2856 2857 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2858 CU_ASSERT(rc == 0); 2859 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2860 stub_complete_io(1); 2861 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2862 2863 /* Pass unaligned iov with 2048 alignment required */ 2864 alignment = 2048; 2865 bdev->required_alignment = spdk_u32log2(alignment); 2866 2867 iovcnt = 2; 2868 iovs[0].iov_base = buf + 16; 2869 iovs[0].iov_len = 256; 2870 iovs[1].iov_base = buf + 16 + 256 + 32; 2871 iovs[1].iov_len = 256; 2872 2873 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2874 CU_ASSERT(rc == 0); 2875 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2876 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2877 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2878 alignment)); 2879 stub_complete_io(1); 2880 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2881 2882 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2883 CU_ASSERT(rc == 0); 2884 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2885 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2886 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2887 alignment)); 2888 stub_complete_io(1); 2889 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2890 2891 /* Pass iov without allocated buffer without alignment required */ 2892 alignment = 1; 2893 bdev->required_alignment = spdk_u32log2(alignment); 2894 2895 iovcnt = 1; 2896 iovs[0].iov_base = NULL; 2897 iovs[0].iov_len = 0; 2898 2899 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2900 CU_ASSERT(rc == 0); 2901 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2902 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2903 alignment)); 2904 stub_complete_io(1); 2905 2906 /* Pass iov without allocated buffer with 1024 alignment required */ 2907 alignment = 1024; 2908 bdev->required_alignment = spdk_u32log2(alignment); 2909 2910 iovcnt = 1; 2911 iovs[0].iov_base = NULL; 2912 iovs[0].iov_len = 0; 2913 2914 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2915 CU_ASSERT(rc == 0); 2916 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2917 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2918 alignment)); 2919 stub_complete_io(1); 2920 2921 spdk_put_io_channel(io_ch); 2922 spdk_bdev_close(desc); 2923 free_bdev(bdev); 2924 fn_table.submit_request = stub_submit_request; 2925 spdk_bdev_finish(bdev_fini_cb, NULL); 2926 poll_threads(); 2927 2928 free(buf); 2929 } 2930 2931 static void 2932 bdev_io_alignment_with_boundary(void) 2933 { 2934 struct spdk_bdev *bdev; 2935 struct spdk_bdev_desc *desc = NULL; 2936 struct spdk_io_channel *io_ch; 2937 struct spdk_bdev_opts bdev_opts = {}; 2938 int rc; 2939 void *buf = NULL; 2940 struct iovec iovs[2]; 2941 int iovcnt; 2942 uint64_t alignment; 2943 2944 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2945 bdev_opts.bdev_io_pool_size = 20; 2946 bdev_opts.bdev_io_cache_size = 2; 2947 2948 bdev_opts.opts_size = sizeof(bdev_opts); 2949 rc = spdk_bdev_set_opts(&bdev_opts); 2950 CU_ASSERT(rc == 0); 2951 spdk_bdev_initialize(bdev_init_cb, NULL); 2952 2953 fn_table.submit_request = stub_submit_request_get_buf; 2954 bdev = allocate_bdev("bdev0"); 2955 2956 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2957 CU_ASSERT(rc == 0); 2958 CU_ASSERT(desc != NULL); 2959 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2960 io_ch = spdk_bdev_get_io_channel(desc); 2961 CU_ASSERT(io_ch != NULL); 2962 2963 /* Create aligned buffer */ 2964 rc = posix_memalign(&buf, 4096, 131072); 2965 SPDK_CU_ASSERT_FATAL(rc == 0); 2966 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2967 2968 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2969 alignment = 512; 2970 bdev->required_alignment = spdk_u32log2(alignment); 2971 bdev->optimal_io_boundary = 2; 2972 bdev->split_on_optimal_io_boundary = true; 2973 2974 iovcnt = 1; 2975 iovs[0].iov_base = NULL; 2976 iovs[0].iov_len = 512 * 3; 2977 2978 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2979 CU_ASSERT(rc == 0); 2980 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2981 stub_complete_io(2); 2982 2983 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2984 alignment = 512; 2985 bdev->required_alignment = spdk_u32log2(alignment); 2986 bdev->optimal_io_boundary = 16; 2987 bdev->split_on_optimal_io_boundary = true; 2988 2989 iovcnt = 1; 2990 iovs[0].iov_base = NULL; 2991 iovs[0].iov_len = 512 * 16; 2992 2993 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2994 CU_ASSERT(rc == 0); 2995 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2996 stub_complete_io(2); 2997 2998 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2999 alignment = 512; 3000 bdev->required_alignment = spdk_u32log2(alignment); 3001 bdev->optimal_io_boundary = 128; 3002 bdev->split_on_optimal_io_boundary = true; 3003 3004 iovcnt = 1; 3005 iovs[0].iov_base = buf + 16; 3006 iovs[0].iov_len = 512 * 160; 3007 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3008 CU_ASSERT(rc == 0); 3009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3010 stub_complete_io(2); 3011 3012 /* 512 * 3 with 2 IO boundary */ 3013 alignment = 512; 3014 bdev->required_alignment = spdk_u32log2(alignment); 3015 bdev->optimal_io_boundary = 2; 3016 bdev->split_on_optimal_io_boundary = true; 3017 3018 iovcnt = 2; 3019 iovs[0].iov_base = buf + 16; 3020 iovs[0].iov_len = 512; 3021 iovs[1].iov_base = buf + 16 + 512 + 32; 3022 iovs[1].iov_len = 1024; 3023 3024 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3025 CU_ASSERT(rc == 0); 3026 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3027 stub_complete_io(2); 3028 3029 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3030 CU_ASSERT(rc == 0); 3031 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3032 stub_complete_io(2); 3033 3034 /* 512 * 64 with 32 IO boundary */ 3035 bdev->optimal_io_boundary = 32; 3036 iovcnt = 2; 3037 iovs[0].iov_base = buf + 16; 3038 iovs[0].iov_len = 16384; 3039 iovs[1].iov_base = buf + 16 + 16384 + 32; 3040 iovs[1].iov_len = 16384; 3041 3042 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3043 CU_ASSERT(rc == 0); 3044 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3045 stub_complete_io(3); 3046 3047 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3048 CU_ASSERT(rc == 0); 3049 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3050 stub_complete_io(3); 3051 3052 /* 512 * 160 with 32 IO boundary */ 3053 iovcnt = 1; 3054 iovs[0].iov_base = buf + 16; 3055 iovs[0].iov_len = 16384 + 65536; 3056 3057 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3058 CU_ASSERT(rc == 0); 3059 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3060 stub_complete_io(6); 3061 3062 spdk_put_io_channel(io_ch); 3063 spdk_bdev_close(desc); 3064 free_bdev(bdev); 3065 fn_table.submit_request = stub_submit_request; 3066 spdk_bdev_finish(bdev_fini_cb, NULL); 3067 poll_threads(); 3068 3069 free(buf); 3070 } 3071 3072 static void 3073 histogram_status_cb(void *cb_arg, int status) 3074 { 3075 g_status = status; 3076 } 3077 3078 static void 3079 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3080 { 3081 g_status = status; 3082 g_histogram = histogram; 3083 } 3084 3085 static void 3086 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3087 uint64_t total, uint64_t so_far) 3088 { 3089 g_count += count; 3090 } 3091 3092 static void 3093 bdev_histograms(void) 3094 { 3095 struct spdk_bdev *bdev; 3096 struct spdk_bdev_desc *desc = NULL; 3097 struct spdk_io_channel *ch; 3098 struct spdk_histogram_data *histogram; 3099 uint8_t buf[4096]; 3100 int rc; 3101 3102 spdk_bdev_initialize(bdev_init_cb, NULL); 3103 3104 bdev = allocate_bdev("bdev"); 3105 3106 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3107 CU_ASSERT(rc == 0); 3108 CU_ASSERT(desc != NULL); 3109 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3110 3111 ch = spdk_bdev_get_io_channel(desc); 3112 CU_ASSERT(ch != NULL); 3113 3114 /* Enable histogram */ 3115 g_status = -1; 3116 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3117 poll_threads(); 3118 CU_ASSERT(g_status == 0); 3119 CU_ASSERT(bdev->internal.histogram_enabled == true); 3120 3121 /* Allocate histogram */ 3122 histogram = spdk_histogram_data_alloc(); 3123 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3124 3125 /* Check if histogram is zeroed */ 3126 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3127 poll_threads(); 3128 CU_ASSERT(g_status == 0); 3129 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3130 3131 g_count = 0; 3132 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3133 3134 CU_ASSERT(g_count == 0); 3135 3136 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3137 CU_ASSERT(rc == 0); 3138 3139 spdk_delay_us(10); 3140 stub_complete_io(1); 3141 poll_threads(); 3142 3143 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3144 CU_ASSERT(rc == 0); 3145 3146 spdk_delay_us(10); 3147 stub_complete_io(1); 3148 poll_threads(); 3149 3150 /* Check if histogram gathered data from all I/O channels */ 3151 g_histogram = NULL; 3152 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3153 poll_threads(); 3154 CU_ASSERT(g_status == 0); 3155 CU_ASSERT(bdev->internal.histogram_enabled == true); 3156 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3157 3158 g_count = 0; 3159 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3160 CU_ASSERT(g_count == 2); 3161 3162 /* Disable histogram */ 3163 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3164 poll_threads(); 3165 CU_ASSERT(g_status == 0); 3166 CU_ASSERT(bdev->internal.histogram_enabled == false); 3167 3168 /* Try to run histogram commands on disabled bdev */ 3169 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3170 poll_threads(); 3171 CU_ASSERT(g_status == -EFAULT); 3172 3173 spdk_histogram_data_free(histogram); 3174 spdk_put_io_channel(ch); 3175 spdk_bdev_close(desc); 3176 free_bdev(bdev); 3177 spdk_bdev_finish(bdev_fini_cb, NULL); 3178 poll_threads(); 3179 } 3180 3181 static void 3182 _bdev_compare(bool emulated) 3183 { 3184 struct spdk_bdev *bdev; 3185 struct spdk_bdev_desc *desc = NULL; 3186 struct spdk_io_channel *ioch; 3187 struct ut_expected_io *expected_io; 3188 uint64_t offset, num_blocks; 3189 uint32_t num_completed; 3190 char aa_buf[512]; 3191 char bb_buf[512]; 3192 struct iovec compare_iov; 3193 uint8_t io_type; 3194 int rc; 3195 3196 if (emulated) { 3197 io_type = SPDK_BDEV_IO_TYPE_READ; 3198 } else { 3199 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3200 } 3201 3202 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3203 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3204 3205 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3206 3207 spdk_bdev_initialize(bdev_init_cb, NULL); 3208 fn_table.submit_request = stub_submit_request_get_buf; 3209 bdev = allocate_bdev("bdev"); 3210 3211 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3212 CU_ASSERT_EQUAL(rc, 0); 3213 SPDK_CU_ASSERT_FATAL(desc != NULL); 3214 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3215 ioch = spdk_bdev_get_io_channel(desc); 3216 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3217 3218 fn_table.submit_request = stub_submit_request_get_buf; 3219 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3220 3221 offset = 50; 3222 num_blocks = 1; 3223 compare_iov.iov_base = aa_buf; 3224 compare_iov.iov_len = sizeof(aa_buf); 3225 3226 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3227 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3228 3229 g_io_done = false; 3230 g_compare_read_buf = aa_buf; 3231 g_compare_read_buf_len = sizeof(aa_buf); 3232 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3233 CU_ASSERT_EQUAL(rc, 0); 3234 num_completed = stub_complete_io(1); 3235 CU_ASSERT_EQUAL(num_completed, 1); 3236 CU_ASSERT(g_io_done == true); 3237 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3238 3239 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3240 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3241 3242 g_io_done = false; 3243 g_compare_read_buf = bb_buf; 3244 g_compare_read_buf_len = sizeof(bb_buf); 3245 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3246 CU_ASSERT_EQUAL(rc, 0); 3247 num_completed = stub_complete_io(1); 3248 CU_ASSERT_EQUAL(num_completed, 1); 3249 CU_ASSERT(g_io_done == true); 3250 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3251 3252 spdk_put_io_channel(ioch); 3253 spdk_bdev_close(desc); 3254 free_bdev(bdev); 3255 fn_table.submit_request = stub_submit_request; 3256 spdk_bdev_finish(bdev_fini_cb, NULL); 3257 poll_threads(); 3258 3259 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3260 3261 g_compare_read_buf = NULL; 3262 } 3263 3264 static void 3265 bdev_compare(void) 3266 { 3267 _bdev_compare(true); 3268 _bdev_compare(false); 3269 } 3270 3271 static void 3272 bdev_compare_and_write(void) 3273 { 3274 struct spdk_bdev *bdev; 3275 struct spdk_bdev_desc *desc = NULL; 3276 struct spdk_io_channel *ioch; 3277 struct ut_expected_io *expected_io; 3278 uint64_t offset, num_blocks; 3279 uint32_t num_completed; 3280 char aa_buf[512]; 3281 char bb_buf[512]; 3282 char cc_buf[512]; 3283 char write_buf[512]; 3284 struct iovec compare_iov; 3285 struct iovec write_iov; 3286 int rc; 3287 3288 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3289 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3290 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3291 3292 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3293 3294 spdk_bdev_initialize(bdev_init_cb, NULL); 3295 fn_table.submit_request = stub_submit_request_get_buf; 3296 bdev = allocate_bdev("bdev"); 3297 3298 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3299 CU_ASSERT_EQUAL(rc, 0); 3300 SPDK_CU_ASSERT_FATAL(desc != NULL); 3301 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3302 ioch = spdk_bdev_get_io_channel(desc); 3303 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3304 3305 fn_table.submit_request = stub_submit_request_get_buf; 3306 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3307 3308 offset = 50; 3309 num_blocks = 1; 3310 compare_iov.iov_base = aa_buf; 3311 compare_iov.iov_len = sizeof(aa_buf); 3312 write_iov.iov_base = bb_buf; 3313 write_iov.iov_len = sizeof(bb_buf); 3314 3315 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3316 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3317 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3318 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3319 3320 g_io_done = false; 3321 g_compare_read_buf = aa_buf; 3322 g_compare_read_buf_len = sizeof(aa_buf); 3323 memset(write_buf, 0, sizeof(write_buf)); 3324 g_compare_write_buf = write_buf; 3325 g_compare_write_buf_len = sizeof(write_buf); 3326 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3327 offset, num_blocks, io_done, NULL); 3328 /* Trigger range locking */ 3329 poll_threads(); 3330 CU_ASSERT_EQUAL(rc, 0); 3331 num_completed = stub_complete_io(1); 3332 CU_ASSERT_EQUAL(num_completed, 1); 3333 CU_ASSERT(g_io_done == false); 3334 num_completed = stub_complete_io(1); 3335 /* Trigger range unlocking */ 3336 poll_threads(); 3337 CU_ASSERT_EQUAL(num_completed, 1); 3338 CU_ASSERT(g_io_done == true); 3339 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3340 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3341 3342 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3343 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3344 3345 g_io_done = false; 3346 g_compare_read_buf = cc_buf; 3347 g_compare_read_buf_len = sizeof(cc_buf); 3348 memset(write_buf, 0, sizeof(write_buf)); 3349 g_compare_write_buf = write_buf; 3350 g_compare_write_buf_len = sizeof(write_buf); 3351 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3352 offset, num_blocks, io_done, NULL); 3353 /* Trigger range locking */ 3354 poll_threads(); 3355 CU_ASSERT_EQUAL(rc, 0); 3356 num_completed = stub_complete_io(1); 3357 /* Trigger range unlocking earlier because we expect error here */ 3358 poll_threads(); 3359 CU_ASSERT_EQUAL(num_completed, 1); 3360 CU_ASSERT(g_io_done == true); 3361 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3362 num_completed = stub_complete_io(1); 3363 CU_ASSERT_EQUAL(num_completed, 0); 3364 3365 spdk_put_io_channel(ioch); 3366 spdk_bdev_close(desc); 3367 free_bdev(bdev); 3368 fn_table.submit_request = stub_submit_request; 3369 spdk_bdev_finish(bdev_fini_cb, NULL); 3370 poll_threads(); 3371 3372 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3373 3374 g_compare_read_buf = NULL; 3375 g_compare_write_buf = NULL; 3376 } 3377 3378 static void 3379 bdev_write_zeroes(void) 3380 { 3381 struct spdk_bdev *bdev; 3382 struct spdk_bdev_desc *desc = NULL; 3383 struct spdk_io_channel *ioch; 3384 struct ut_expected_io *expected_io; 3385 uint64_t offset, num_io_blocks, num_blocks; 3386 uint32_t num_completed, num_requests; 3387 int rc; 3388 3389 spdk_bdev_initialize(bdev_init_cb, NULL); 3390 bdev = allocate_bdev("bdev"); 3391 3392 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3393 CU_ASSERT_EQUAL(rc, 0); 3394 SPDK_CU_ASSERT_FATAL(desc != NULL); 3395 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3396 ioch = spdk_bdev_get_io_channel(desc); 3397 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3398 3399 fn_table.submit_request = stub_submit_request; 3400 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3401 3402 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3403 bdev->md_len = 0; 3404 bdev->blocklen = 4096; 3405 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3406 3407 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3408 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3409 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3410 CU_ASSERT_EQUAL(rc, 0); 3411 num_completed = stub_complete_io(1); 3412 CU_ASSERT_EQUAL(num_completed, 1); 3413 3414 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3415 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3416 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3417 num_requests = 2; 3418 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3419 3420 for (offset = 0; offset < num_requests; ++offset) { 3421 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3422 offset * num_io_blocks, num_io_blocks, 0); 3423 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3424 } 3425 3426 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3427 CU_ASSERT_EQUAL(rc, 0); 3428 num_completed = stub_complete_io(num_requests); 3429 CU_ASSERT_EQUAL(num_completed, num_requests); 3430 3431 /* Check that the splitting is correct if bdev has interleaved metadata */ 3432 bdev->md_interleave = true; 3433 bdev->md_len = 64; 3434 bdev->blocklen = 4096 + 64; 3435 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3436 3437 num_requests = offset = 0; 3438 while (offset < num_blocks) { 3439 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3440 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3441 offset, num_io_blocks, 0); 3442 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3443 offset += num_io_blocks; 3444 num_requests++; 3445 } 3446 3447 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3448 CU_ASSERT_EQUAL(rc, 0); 3449 num_completed = stub_complete_io(num_requests); 3450 CU_ASSERT_EQUAL(num_completed, num_requests); 3451 num_completed = stub_complete_io(num_requests); 3452 assert(num_completed == 0); 3453 3454 /* Check the the same for separate metadata buffer */ 3455 bdev->md_interleave = false; 3456 bdev->md_len = 64; 3457 bdev->blocklen = 4096; 3458 3459 num_requests = offset = 0; 3460 while (offset < num_blocks) { 3461 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3462 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3463 offset, num_io_blocks, 0); 3464 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3465 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3466 offset += num_io_blocks; 3467 num_requests++; 3468 } 3469 3470 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3471 CU_ASSERT_EQUAL(rc, 0); 3472 num_completed = stub_complete_io(num_requests); 3473 CU_ASSERT_EQUAL(num_completed, num_requests); 3474 3475 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3476 spdk_put_io_channel(ioch); 3477 spdk_bdev_close(desc); 3478 free_bdev(bdev); 3479 spdk_bdev_finish(bdev_fini_cb, NULL); 3480 poll_threads(); 3481 } 3482 3483 static void 3484 bdev_zcopy_write(void) 3485 { 3486 struct spdk_bdev *bdev; 3487 struct spdk_bdev_desc *desc = NULL; 3488 struct spdk_io_channel *ioch; 3489 struct ut_expected_io *expected_io; 3490 uint64_t offset, num_blocks; 3491 uint32_t num_completed; 3492 char aa_buf[512]; 3493 struct iovec iov; 3494 int rc; 3495 const bool populate = false; 3496 const bool commit = true; 3497 3498 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3499 3500 spdk_bdev_initialize(bdev_init_cb, NULL); 3501 bdev = allocate_bdev("bdev"); 3502 3503 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3504 CU_ASSERT_EQUAL(rc, 0); 3505 SPDK_CU_ASSERT_FATAL(desc != NULL); 3506 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3507 ioch = spdk_bdev_get_io_channel(desc); 3508 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3509 3510 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3511 3512 offset = 50; 3513 num_blocks = 1; 3514 iov.iov_base = NULL; 3515 iov.iov_len = 0; 3516 3517 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3518 g_zcopy_read_buf_len = (uint32_t) -1; 3519 /* Do a zcopy start for a write (populate=false) */ 3520 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3521 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3522 g_io_done = false; 3523 g_zcopy_write_buf = aa_buf; 3524 g_zcopy_write_buf_len = sizeof(aa_buf); 3525 g_zcopy_bdev_io = NULL; 3526 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3527 CU_ASSERT_EQUAL(rc, 0); 3528 num_completed = stub_complete_io(1); 3529 CU_ASSERT_EQUAL(num_completed, 1); 3530 CU_ASSERT(g_io_done == true); 3531 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3532 /* Check that the iov has been set up */ 3533 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3534 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3535 /* Check that the bdev_io has been saved */ 3536 CU_ASSERT(g_zcopy_bdev_io != NULL); 3537 /* Now do the zcopy end for a write (commit=true) */ 3538 g_io_done = false; 3539 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3540 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3541 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3542 CU_ASSERT_EQUAL(rc, 0); 3543 num_completed = stub_complete_io(1); 3544 CU_ASSERT_EQUAL(num_completed, 1); 3545 CU_ASSERT(g_io_done == true); 3546 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3547 /* Check the g_zcopy are reset by io_done */ 3548 CU_ASSERT(g_zcopy_write_buf == NULL); 3549 CU_ASSERT(g_zcopy_write_buf_len == 0); 3550 /* Check that io_done has freed the g_zcopy_bdev_io */ 3551 CU_ASSERT(g_zcopy_bdev_io == NULL); 3552 3553 /* Check the zcopy read buffer has not been touched which 3554 * ensures that the correct buffers were used. 3555 */ 3556 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3557 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3558 3559 spdk_put_io_channel(ioch); 3560 spdk_bdev_close(desc); 3561 free_bdev(bdev); 3562 spdk_bdev_finish(bdev_fini_cb, NULL); 3563 poll_threads(); 3564 } 3565 3566 static void 3567 bdev_zcopy_read(void) 3568 { 3569 struct spdk_bdev *bdev; 3570 struct spdk_bdev_desc *desc = NULL; 3571 struct spdk_io_channel *ioch; 3572 struct ut_expected_io *expected_io; 3573 uint64_t offset, num_blocks; 3574 uint32_t num_completed; 3575 char aa_buf[512]; 3576 struct iovec iov; 3577 int rc; 3578 const bool populate = true; 3579 const bool commit = false; 3580 3581 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3582 3583 spdk_bdev_initialize(bdev_init_cb, NULL); 3584 bdev = allocate_bdev("bdev"); 3585 3586 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3587 CU_ASSERT_EQUAL(rc, 0); 3588 SPDK_CU_ASSERT_FATAL(desc != NULL); 3589 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3590 ioch = spdk_bdev_get_io_channel(desc); 3591 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3592 3593 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3594 3595 offset = 50; 3596 num_blocks = 1; 3597 iov.iov_base = NULL; 3598 iov.iov_len = 0; 3599 3600 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3601 g_zcopy_write_buf_len = (uint32_t) -1; 3602 3603 /* Do a zcopy start for a read (populate=true) */ 3604 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3605 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3606 g_io_done = false; 3607 g_zcopy_read_buf = aa_buf; 3608 g_zcopy_read_buf_len = sizeof(aa_buf); 3609 g_zcopy_bdev_io = NULL; 3610 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3611 CU_ASSERT_EQUAL(rc, 0); 3612 num_completed = stub_complete_io(1); 3613 CU_ASSERT_EQUAL(num_completed, 1); 3614 CU_ASSERT(g_io_done == true); 3615 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3616 /* Check that the iov has been set up */ 3617 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3618 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3619 /* Check that the bdev_io has been saved */ 3620 CU_ASSERT(g_zcopy_bdev_io != NULL); 3621 3622 /* Now do the zcopy end for a read (commit=false) */ 3623 g_io_done = false; 3624 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3625 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3626 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3627 CU_ASSERT_EQUAL(rc, 0); 3628 num_completed = stub_complete_io(1); 3629 CU_ASSERT_EQUAL(num_completed, 1); 3630 CU_ASSERT(g_io_done == true); 3631 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3632 /* Check the g_zcopy are reset by io_done */ 3633 CU_ASSERT(g_zcopy_read_buf == NULL); 3634 CU_ASSERT(g_zcopy_read_buf_len == 0); 3635 /* Check that io_done has freed the g_zcopy_bdev_io */ 3636 CU_ASSERT(g_zcopy_bdev_io == NULL); 3637 3638 /* Check the zcopy write buffer has not been touched which 3639 * ensures that the correct buffers were used. 3640 */ 3641 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3642 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3643 3644 spdk_put_io_channel(ioch); 3645 spdk_bdev_close(desc); 3646 free_bdev(bdev); 3647 spdk_bdev_finish(bdev_fini_cb, NULL); 3648 poll_threads(); 3649 } 3650 3651 static void 3652 bdev_open_while_hotremove(void) 3653 { 3654 struct spdk_bdev *bdev; 3655 struct spdk_bdev_desc *desc[2] = {}; 3656 int rc; 3657 3658 bdev = allocate_bdev("bdev"); 3659 3660 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3661 CU_ASSERT(rc == 0); 3662 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3663 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3664 3665 spdk_bdev_unregister(bdev, NULL, NULL); 3666 3667 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3668 CU_ASSERT(rc == -ENODEV); 3669 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3670 3671 spdk_bdev_close(desc[0]); 3672 free_bdev(bdev); 3673 } 3674 3675 static void 3676 bdev_close_while_hotremove(void) 3677 { 3678 struct spdk_bdev *bdev; 3679 struct spdk_bdev_desc *desc = NULL; 3680 int rc = 0; 3681 3682 bdev = allocate_bdev("bdev"); 3683 3684 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3685 CU_ASSERT_EQUAL(rc, 0); 3686 SPDK_CU_ASSERT_FATAL(desc != NULL); 3687 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3688 3689 /* Simulate hot-unplug by unregistering bdev */ 3690 g_event_type1 = 0xFF; 3691 g_unregister_arg = NULL; 3692 g_unregister_rc = -1; 3693 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3694 /* Close device while remove event is in flight */ 3695 spdk_bdev_close(desc); 3696 3697 /* Ensure that unregister callback is delayed */ 3698 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3699 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3700 3701 poll_threads(); 3702 3703 /* Event callback shall not be issued because device was closed */ 3704 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3705 /* Unregister callback is issued */ 3706 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3707 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3708 3709 free_bdev(bdev); 3710 } 3711 3712 static void 3713 bdev_open_ext(void) 3714 { 3715 struct spdk_bdev *bdev; 3716 struct spdk_bdev_desc *desc1 = NULL; 3717 struct spdk_bdev_desc *desc2 = NULL; 3718 int rc = 0; 3719 3720 bdev = allocate_bdev("bdev"); 3721 3722 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3723 CU_ASSERT_EQUAL(rc, -EINVAL); 3724 3725 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3726 CU_ASSERT_EQUAL(rc, 0); 3727 3728 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3729 CU_ASSERT_EQUAL(rc, 0); 3730 3731 g_event_type1 = 0xFF; 3732 g_event_type2 = 0xFF; 3733 3734 /* Simulate hot-unplug by unregistering bdev */ 3735 spdk_bdev_unregister(bdev, NULL, NULL); 3736 poll_threads(); 3737 3738 /* Check if correct events have been triggered in event callback fn */ 3739 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3740 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3741 3742 free_bdev(bdev); 3743 poll_threads(); 3744 } 3745 3746 struct timeout_io_cb_arg { 3747 struct iovec iov; 3748 uint8_t type; 3749 }; 3750 3751 static int 3752 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3753 { 3754 struct spdk_bdev_io *bdev_io; 3755 int n = 0; 3756 3757 if (!ch) { 3758 return -1; 3759 } 3760 3761 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3762 n++; 3763 } 3764 3765 return n; 3766 } 3767 3768 static void 3769 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3770 { 3771 struct timeout_io_cb_arg *ctx = cb_arg; 3772 3773 ctx->type = bdev_io->type; 3774 ctx->iov.iov_base = bdev_io->iov.iov_base; 3775 ctx->iov.iov_len = bdev_io->iov.iov_len; 3776 } 3777 3778 static void 3779 bdev_set_io_timeout(void) 3780 { 3781 struct spdk_bdev *bdev; 3782 struct spdk_bdev_desc *desc = NULL; 3783 struct spdk_io_channel *io_ch = NULL; 3784 struct spdk_bdev_channel *bdev_ch = NULL; 3785 struct timeout_io_cb_arg cb_arg; 3786 3787 spdk_bdev_initialize(bdev_init_cb, NULL); 3788 3789 bdev = allocate_bdev("bdev"); 3790 3791 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3792 SPDK_CU_ASSERT_FATAL(desc != NULL); 3793 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3794 3795 io_ch = spdk_bdev_get_io_channel(desc); 3796 CU_ASSERT(io_ch != NULL); 3797 3798 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3799 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3800 3801 /* This is the part1. 3802 * We will check the bdev_ch->io_submitted list 3803 * TO make sure that it can link IOs and only the user submitted IOs 3804 */ 3805 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3806 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3807 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3808 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3809 stub_complete_io(1); 3810 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3811 stub_complete_io(1); 3812 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3813 3814 /* Split IO */ 3815 bdev->optimal_io_boundary = 16; 3816 bdev->split_on_optimal_io_boundary = true; 3817 3818 /* Now test that a single-vector command is split correctly. 3819 * Offset 14, length 8, payload 0xF000 3820 * Child - Offset 14, length 2, payload 0xF000 3821 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3822 * 3823 * Set up the expected values before calling spdk_bdev_read_blocks 3824 */ 3825 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3826 /* We count all submitted IOs including IO that are generated by splitting. */ 3827 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 3828 stub_complete_io(1); 3829 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3830 stub_complete_io(1); 3831 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3832 3833 /* Also include the reset IO */ 3834 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3835 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3836 poll_threads(); 3837 stub_complete_io(1); 3838 poll_threads(); 3839 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3840 3841 /* This is part2 3842 * Test the desc timeout poller register 3843 */ 3844 3845 /* Successfully set the timeout */ 3846 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3847 CU_ASSERT(desc->io_timeout_poller != NULL); 3848 CU_ASSERT(desc->timeout_in_sec == 30); 3849 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3850 CU_ASSERT(desc->cb_arg == &cb_arg); 3851 3852 /* Change the timeout limit */ 3853 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3854 CU_ASSERT(desc->io_timeout_poller != NULL); 3855 CU_ASSERT(desc->timeout_in_sec == 20); 3856 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3857 CU_ASSERT(desc->cb_arg == &cb_arg); 3858 3859 /* Disable the timeout */ 3860 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 3861 CU_ASSERT(desc->io_timeout_poller == NULL); 3862 3863 /* This the part3 3864 * We will test to catch timeout IO and check whether the IO is 3865 * the submitted one. 3866 */ 3867 memset(&cb_arg, 0, sizeof(cb_arg)); 3868 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3869 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 3870 3871 /* Don't reach the limit */ 3872 spdk_delay_us(15 * spdk_get_ticks_hz()); 3873 poll_threads(); 3874 CU_ASSERT(cb_arg.type == 0); 3875 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3876 CU_ASSERT(cb_arg.iov.iov_len == 0); 3877 3878 /* 15 + 15 = 30 reach the limit */ 3879 spdk_delay_us(15 * spdk_get_ticks_hz()); 3880 poll_threads(); 3881 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3882 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 3883 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 3884 stub_complete_io(1); 3885 3886 /* Use the same split IO above and check the IO */ 3887 memset(&cb_arg, 0, sizeof(cb_arg)); 3888 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3889 3890 /* The first child complete in time */ 3891 spdk_delay_us(15 * spdk_get_ticks_hz()); 3892 poll_threads(); 3893 stub_complete_io(1); 3894 CU_ASSERT(cb_arg.type == 0); 3895 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3896 CU_ASSERT(cb_arg.iov.iov_len == 0); 3897 3898 /* The second child reach the limit */ 3899 spdk_delay_us(15 * spdk_get_ticks_hz()); 3900 poll_threads(); 3901 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3902 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 3903 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 3904 stub_complete_io(1); 3905 3906 /* Also include the reset IO */ 3907 memset(&cb_arg, 0, sizeof(cb_arg)); 3908 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3909 spdk_delay_us(30 * spdk_get_ticks_hz()); 3910 poll_threads(); 3911 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 3912 stub_complete_io(1); 3913 poll_threads(); 3914 3915 spdk_put_io_channel(io_ch); 3916 spdk_bdev_close(desc); 3917 free_bdev(bdev); 3918 spdk_bdev_finish(bdev_fini_cb, NULL); 3919 poll_threads(); 3920 } 3921 3922 static void 3923 lba_range_overlap(void) 3924 { 3925 struct lba_range r1, r2; 3926 3927 r1.offset = 100; 3928 r1.length = 50; 3929 3930 r2.offset = 0; 3931 r2.length = 1; 3932 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3933 3934 r2.offset = 0; 3935 r2.length = 100; 3936 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3937 3938 r2.offset = 0; 3939 r2.length = 110; 3940 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3941 3942 r2.offset = 100; 3943 r2.length = 10; 3944 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3945 3946 r2.offset = 110; 3947 r2.length = 20; 3948 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3949 3950 r2.offset = 140; 3951 r2.length = 150; 3952 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3953 3954 r2.offset = 130; 3955 r2.length = 200; 3956 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3957 3958 r2.offset = 150; 3959 r2.length = 100; 3960 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3961 3962 r2.offset = 110; 3963 r2.length = 0; 3964 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3965 } 3966 3967 static bool g_lock_lba_range_done; 3968 static bool g_unlock_lba_range_done; 3969 3970 static void 3971 lock_lba_range_done(void *ctx, int status) 3972 { 3973 g_lock_lba_range_done = true; 3974 } 3975 3976 static void 3977 unlock_lba_range_done(void *ctx, int status) 3978 { 3979 g_unlock_lba_range_done = true; 3980 } 3981 3982 static void 3983 lock_lba_range_check_ranges(void) 3984 { 3985 struct spdk_bdev *bdev; 3986 struct spdk_bdev_desc *desc = NULL; 3987 struct spdk_io_channel *io_ch; 3988 struct spdk_bdev_channel *channel; 3989 struct lba_range *range; 3990 int ctx1; 3991 int rc; 3992 3993 spdk_bdev_initialize(bdev_init_cb, NULL); 3994 3995 bdev = allocate_bdev("bdev0"); 3996 3997 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3998 CU_ASSERT(rc == 0); 3999 CU_ASSERT(desc != NULL); 4000 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4001 io_ch = spdk_bdev_get_io_channel(desc); 4002 CU_ASSERT(io_ch != NULL); 4003 channel = spdk_io_channel_get_ctx(io_ch); 4004 4005 g_lock_lba_range_done = false; 4006 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4007 CU_ASSERT(rc == 0); 4008 poll_threads(); 4009 4010 CU_ASSERT(g_lock_lba_range_done == true); 4011 range = TAILQ_FIRST(&channel->locked_ranges); 4012 SPDK_CU_ASSERT_FATAL(range != NULL); 4013 CU_ASSERT(range->offset == 20); 4014 CU_ASSERT(range->length == 10); 4015 CU_ASSERT(range->owner_ch == channel); 4016 4017 /* Unlocks must exactly match a lock. */ 4018 g_unlock_lba_range_done = false; 4019 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4020 CU_ASSERT(rc == -EINVAL); 4021 CU_ASSERT(g_unlock_lba_range_done == false); 4022 4023 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4024 CU_ASSERT(rc == 0); 4025 spdk_delay_us(100); 4026 poll_threads(); 4027 4028 CU_ASSERT(g_unlock_lba_range_done == true); 4029 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4030 4031 spdk_put_io_channel(io_ch); 4032 spdk_bdev_close(desc); 4033 free_bdev(bdev); 4034 spdk_bdev_finish(bdev_fini_cb, NULL); 4035 poll_threads(); 4036 } 4037 4038 static void 4039 lock_lba_range_with_io_outstanding(void) 4040 { 4041 struct spdk_bdev *bdev; 4042 struct spdk_bdev_desc *desc = NULL; 4043 struct spdk_io_channel *io_ch; 4044 struct spdk_bdev_channel *channel; 4045 struct lba_range *range; 4046 char buf[4096]; 4047 int ctx1; 4048 int rc; 4049 4050 spdk_bdev_initialize(bdev_init_cb, NULL); 4051 4052 bdev = allocate_bdev("bdev0"); 4053 4054 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4055 CU_ASSERT(rc == 0); 4056 CU_ASSERT(desc != NULL); 4057 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4058 io_ch = spdk_bdev_get_io_channel(desc); 4059 CU_ASSERT(io_ch != NULL); 4060 channel = spdk_io_channel_get_ctx(io_ch); 4061 4062 g_io_done = false; 4063 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4064 CU_ASSERT(rc == 0); 4065 4066 g_lock_lba_range_done = false; 4067 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4068 CU_ASSERT(rc == 0); 4069 poll_threads(); 4070 4071 /* The lock should immediately become valid, since there are no outstanding 4072 * write I/O. 4073 */ 4074 CU_ASSERT(g_io_done == false); 4075 CU_ASSERT(g_lock_lba_range_done == true); 4076 range = TAILQ_FIRST(&channel->locked_ranges); 4077 SPDK_CU_ASSERT_FATAL(range != NULL); 4078 CU_ASSERT(range->offset == 20); 4079 CU_ASSERT(range->length == 10); 4080 CU_ASSERT(range->owner_ch == channel); 4081 CU_ASSERT(range->locked_ctx == &ctx1); 4082 4083 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4084 CU_ASSERT(rc == 0); 4085 stub_complete_io(1); 4086 spdk_delay_us(100); 4087 poll_threads(); 4088 4089 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4090 4091 /* Now try again, but with a write I/O. */ 4092 g_io_done = false; 4093 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4094 CU_ASSERT(rc == 0); 4095 4096 g_lock_lba_range_done = false; 4097 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4098 CU_ASSERT(rc == 0); 4099 poll_threads(); 4100 4101 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4102 * But note that the range should be on the channel's locked_list, to make sure no 4103 * new write I/O are started. 4104 */ 4105 CU_ASSERT(g_io_done == false); 4106 CU_ASSERT(g_lock_lba_range_done == false); 4107 range = TAILQ_FIRST(&channel->locked_ranges); 4108 SPDK_CU_ASSERT_FATAL(range != NULL); 4109 CU_ASSERT(range->offset == 20); 4110 CU_ASSERT(range->length == 10); 4111 4112 /* Complete the write I/O. This should make the lock valid (checked by confirming 4113 * our callback was invoked). 4114 */ 4115 stub_complete_io(1); 4116 spdk_delay_us(100); 4117 poll_threads(); 4118 CU_ASSERT(g_io_done == true); 4119 CU_ASSERT(g_lock_lba_range_done == true); 4120 4121 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4122 CU_ASSERT(rc == 0); 4123 poll_threads(); 4124 4125 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4126 4127 spdk_put_io_channel(io_ch); 4128 spdk_bdev_close(desc); 4129 free_bdev(bdev); 4130 spdk_bdev_finish(bdev_fini_cb, NULL); 4131 poll_threads(); 4132 } 4133 4134 static void 4135 lock_lba_range_overlapped(void) 4136 { 4137 struct spdk_bdev *bdev; 4138 struct spdk_bdev_desc *desc = NULL; 4139 struct spdk_io_channel *io_ch; 4140 struct spdk_bdev_channel *channel; 4141 struct lba_range *range; 4142 int ctx1; 4143 int rc; 4144 4145 spdk_bdev_initialize(bdev_init_cb, NULL); 4146 4147 bdev = allocate_bdev("bdev0"); 4148 4149 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4150 CU_ASSERT(rc == 0); 4151 CU_ASSERT(desc != NULL); 4152 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4153 io_ch = spdk_bdev_get_io_channel(desc); 4154 CU_ASSERT(io_ch != NULL); 4155 channel = spdk_io_channel_get_ctx(io_ch); 4156 4157 /* Lock range 20-29. */ 4158 g_lock_lba_range_done = false; 4159 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4160 CU_ASSERT(rc == 0); 4161 poll_threads(); 4162 4163 CU_ASSERT(g_lock_lba_range_done == true); 4164 range = TAILQ_FIRST(&channel->locked_ranges); 4165 SPDK_CU_ASSERT_FATAL(range != NULL); 4166 CU_ASSERT(range->offset == 20); 4167 CU_ASSERT(range->length == 10); 4168 4169 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4170 * 20-29. 4171 */ 4172 g_lock_lba_range_done = false; 4173 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4174 CU_ASSERT(rc == 0); 4175 poll_threads(); 4176 4177 CU_ASSERT(g_lock_lba_range_done == false); 4178 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4179 SPDK_CU_ASSERT_FATAL(range != NULL); 4180 CU_ASSERT(range->offset == 25); 4181 CU_ASSERT(range->length == 15); 4182 4183 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4184 * no longer overlaps with an active lock. 4185 */ 4186 g_unlock_lba_range_done = false; 4187 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4188 CU_ASSERT(rc == 0); 4189 poll_threads(); 4190 4191 CU_ASSERT(g_unlock_lba_range_done == true); 4192 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4193 range = TAILQ_FIRST(&channel->locked_ranges); 4194 SPDK_CU_ASSERT_FATAL(range != NULL); 4195 CU_ASSERT(range->offset == 25); 4196 CU_ASSERT(range->length == 15); 4197 4198 /* Lock 40-59. This should immediately lock since it does not overlap with the 4199 * currently active 25-39 lock. 4200 */ 4201 g_lock_lba_range_done = false; 4202 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4203 CU_ASSERT(rc == 0); 4204 poll_threads(); 4205 4206 CU_ASSERT(g_lock_lba_range_done == true); 4207 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4208 SPDK_CU_ASSERT_FATAL(range != NULL); 4209 range = TAILQ_NEXT(range, tailq); 4210 SPDK_CU_ASSERT_FATAL(range != NULL); 4211 CU_ASSERT(range->offset == 40); 4212 CU_ASSERT(range->length == 20); 4213 4214 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4215 g_lock_lba_range_done = false; 4216 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4217 CU_ASSERT(rc == 0); 4218 poll_threads(); 4219 4220 CU_ASSERT(g_lock_lba_range_done == false); 4221 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4222 SPDK_CU_ASSERT_FATAL(range != NULL); 4223 CU_ASSERT(range->offset == 35); 4224 CU_ASSERT(range->length == 10); 4225 4226 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4227 * the 40-59 lock is still active. 4228 */ 4229 g_unlock_lba_range_done = false; 4230 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4231 CU_ASSERT(rc == 0); 4232 poll_threads(); 4233 4234 CU_ASSERT(g_unlock_lba_range_done == true); 4235 CU_ASSERT(g_lock_lba_range_done == false); 4236 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4237 SPDK_CU_ASSERT_FATAL(range != NULL); 4238 CU_ASSERT(range->offset == 35); 4239 CU_ASSERT(range->length == 10); 4240 4241 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4242 * no longer any active overlapping locks. 4243 */ 4244 g_unlock_lba_range_done = false; 4245 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4246 CU_ASSERT(rc == 0); 4247 poll_threads(); 4248 4249 CU_ASSERT(g_unlock_lba_range_done == true); 4250 CU_ASSERT(g_lock_lba_range_done == true); 4251 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4252 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4253 SPDK_CU_ASSERT_FATAL(range != NULL); 4254 CU_ASSERT(range->offset == 35); 4255 CU_ASSERT(range->length == 10); 4256 4257 /* Finally, unlock 35-44. */ 4258 g_unlock_lba_range_done = false; 4259 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4260 CU_ASSERT(rc == 0); 4261 poll_threads(); 4262 4263 CU_ASSERT(g_unlock_lba_range_done == true); 4264 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4265 4266 spdk_put_io_channel(io_ch); 4267 spdk_bdev_close(desc); 4268 free_bdev(bdev); 4269 spdk_bdev_finish(bdev_fini_cb, NULL); 4270 poll_threads(); 4271 } 4272 4273 static void 4274 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4275 { 4276 g_abort_done = true; 4277 g_abort_status = bdev_io->internal.status; 4278 spdk_bdev_free_io(bdev_io); 4279 } 4280 4281 static void 4282 bdev_io_abort(void) 4283 { 4284 struct spdk_bdev *bdev; 4285 struct spdk_bdev_desc *desc = NULL; 4286 struct spdk_io_channel *io_ch; 4287 struct spdk_bdev_channel *channel; 4288 struct spdk_bdev_mgmt_channel *mgmt_ch; 4289 struct spdk_bdev_opts bdev_opts = {}; 4290 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4291 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4292 int rc; 4293 4294 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4295 bdev_opts.bdev_io_pool_size = 7; 4296 bdev_opts.bdev_io_cache_size = 2; 4297 4298 rc = spdk_bdev_set_opts(&bdev_opts); 4299 CU_ASSERT(rc == 0); 4300 spdk_bdev_initialize(bdev_init_cb, NULL); 4301 4302 bdev = allocate_bdev("bdev0"); 4303 4304 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4305 CU_ASSERT(rc == 0); 4306 CU_ASSERT(desc != NULL); 4307 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4308 io_ch = spdk_bdev_get_io_channel(desc); 4309 CU_ASSERT(io_ch != NULL); 4310 channel = spdk_io_channel_get_ctx(io_ch); 4311 mgmt_ch = channel->shared_resource->mgmt_ch; 4312 4313 g_abort_done = false; 4314 4315 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4316 4317 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4318 CU_ASSERT(rc == -ENOTSUP); 4319 4320 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4321 4322 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4323 CU_ASSERT(rc == 0); 4324 CU_ASSERT(g_abort_done == true); 4325 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4326 4327 /* Test the case that the target I/O was successfully aborted. */ 4328 g_io_done = false; 4329 4330 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4331 CU_ASSERT(rc == 0); 4332 CU_ASSERT(g_io_done == false); 4333 4334 g_abort_done = false; 4335 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4336 4337 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4338 CU_ASSERT(rc == 0); 4339 CU_ASSERT(g_io_done == true); 4340 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4341 stub_complete_io(1); 4342 CU_ASSERT(g_abort_done == true); 4343 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4344 4345 /* Test the case that the target I/O was not aborted because it completed 4346 * in the middle of execution of the abort. 4347 */ 4348 g_io_done = false; 4349 4350 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4351 CU_ASSERT(rc == 0); 4352 CU_ASSERT(g_io_done == false); 4353 4354 g_abort_done = false; 4355 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4356 4357 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4358 CU_ASSERT(rc == 0); 4359 CU_ASSERT(g_io_done == false); 4360 4361 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4362 stub_complete_io(1); 4363 CU_ASSERT(g_io_done == true); 4364 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4365 4366 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4367 stub_complete_io(1); 4368 CU_ASSERT(g_abort_done == true); 4369 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4370 4371 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4372 4373 bdev->optimal_io_boundary = 16; 4374 bdev->split_on_optimal_io_boundary = true; 4375 4376 /* Test that a single-vector command which is split is aborted correctly. 4377 * Offset 14, length 8, payload 0xF000 4378 * Child - Offset 14, length 2, payload 0xF000 4379 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4380 */ 4381 g_io_done = false; 4382 4383 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4384 CU_ASSERT(rc == 0); 4385 CU_ASSERT(g_io_done == false); 4386 4387 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4388 4389 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4390 4391 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4392 CU_ASSERT(rc == 0); 4393 CU_ASSERT(g_io_done == true); 4394 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4395 stub_complete_io(2); 4396 CU_ASSERT(g_abort_done == true); 4397 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4398 4399 /* Test that a multi-vector command that needs to be split by strip and then 4400 * needs to be split is aborted correctly. Abort is requested before the second 4401 * child I/O was submitted. The parent I/O should complete with failure without 4402 * submitting the second child I/O. 4403 */ 4404 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4405 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4406 iov[i].iov_len = 512; 4407 } 4408 4409 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4410 g_io_done = false; 4411 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4412 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4413 CU_ASSERT(rc == 0); 4414 CU_ASSERT(g_io_done == false); 4415 4416 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4417 4418 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4419 4420 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4421 CU_ASSERT(rc == 0); 4422 CU_ASSERT(g_io_done == true); 4423 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4424 stub_complete_io(1); 4425 CU_ASSERT(g_abort_done == true); 4426 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4427 4428 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4429 4430 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4431 4432 bdev->optimal_io_boundary = 16; 4433 g_io_done = false; 4434 4435 /* Test that a ingle-vector command which is split is aborted correctly. 4436 * Differently from the above, the child abort request will be submitted 4437 * sequentially due to the capacity of spdk_bdev_io. 4438 */ 4439 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4440 CU_ASSERT(rc == 0); 4441 CU_ASSERT(g_io_done == false); 4442 4443 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4444 4445 g_abort_done = false; 4446 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4447 4448 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4449 CU_ASSERT(rc == 0); 4450 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4451 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4452 4453 stub_complete_io(1); 4454 CU_ASSERT(g_io_done == true); 4455 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4456 stub_complete_io(3); 4457 CU_ASSERT(g_abort_done == true); 4458 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4459 4460 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4461 4462 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4463 4464 spdk_put_io_channel(io_ch); 4465 spdk_bdev_close(desc); 4466 free_bdev(bdev); 4467 spdk_bdev_finish(bdev_fini_cb, NULL); 4468 poll_threads(); 4469 } 4470 4471 static void 4472 bdev_unmap(void) 4473 { 4474 struct spdk_bdev *bdev; 4475 struct spdk_bdev_desc *desc = NULL; 4476 struct spdk_io_channel *ioch; 4477 struct spdk_bdev_channel *bdev_ch; 4478 struct ut_expected_io *expected_io; 4479 struct spdk_bdev_opts bdev_opts = {}; 4480 uint32_t i, num_outstanding; 4481 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4482 int rc; 4483 4484 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4485 bdev_opts.bdev_io_pool_size = 512; 4486 bdev_opts.bdev_io_cache_size = 64; 4487 rc = spdk_bdev_set_opts(&bdev_opts); 4488 CU_ASSERT(rc == 0); 4489 4490 spdk_bdev_initialize(bdev_init_cb, NULL); 4491 bdev = allocate_bdev("bdev"); 4492 4493 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4494 CU_ASSERT_EQUAL(rc, 0); 4495 SPDK_CU_ASSERT_FATAL(desc != NULL); 4496 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4497 ioch = spdk_bdev_get_io_channel(desc); 4498 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4499 bdev_ch = spdk_io_channel_get_ctx(ioch); 4500 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4501 4502 fn_table.submit_request = stub_submit_request; 4503 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4504 4505 /* Case 1: First test the request won't be split */ 4506 num_blocks = 32; 4507 4508 g_io_done = false; 4509 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4510 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4511 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4512 CU_ASSERT_EQUAL(rc, 0); 4513 CU_ASSERT(g_io_done == false); 4514 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4515 stub_complete_io(1); 4516 CU_ASSERT(g_io_done == true); 4517 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4518 4519 /* Case 2: Test the split with 2 children requests */ 4520 bdev->max_unmap = 8; 4521 bdev->max_unmap_segments = 2; 4522 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4523 num_blocks = max_unmap_blocks * 2; 4524 offset = 0; 4525 4526 g_io_done = false; 4527 for (i = 0; i < 2; i++) { 4528 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4530 offset += max_unmap_blocks; 4531 } 4532 4533 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4534 CU_ASSERT_EQUAL(rc, 0); 4535 CU_ASSERT(g_io_done == false); 4536 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4537 stub_complete_io(2); 4538 CU_ASSERT(g_io_done == true); 4539 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4540 4541 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4542 num_children = 15; 4543 num_blocks = max_unmap_blocks * num_children; 4544 g_io_done = false; 4545 offset = 0; 4546 for (i = 0; i < num_children; i++) { 4547 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4548 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4549 offset += max_unmap_blocks; 4550 } 4551 4552 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4553 CU_ASSERT_EQUAL(rc, 0); 4554 CU_ASSERT(g_io_done == false); 4555 4556 while (num_children > 0) { 4557 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4558 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4559 stub_complete_io(num_outstanding); 4560 num_children -= num_outstanding; 4561 } 4562 CU_ASSERT(g_io_done == true); 4563 4564 spdk_put_io_channel(ioch); 4565 spdk_bdev_close(desc); 4566 free_bdev(bdev); 4567 spdk_bdev_finish(bdev_fini_cb, NULL); 4568 poll_threads(); 4569 } 4570 4571 static void 4572 bdev_write_zeroes_split_test(void) 4573 { 4574 struct spdk_bdev *bdev; 4575 struct spdk_bdev_desc *desc = NULL; 4576 struct spdk_io_channel *ioch; 4577 struct spdk_bdev_channel *bdev_ch; 4578 struct ut_expected_io *expected_io; 4579 struct spdk_bdev_opts bdev_opts = {}; 4580 uint32_t i, num_outstanding; 4581 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 4582 int rc; 4583 4584 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4585 bdev_opts.bdev_io_pool_size = 512; 4586 bdev_opts.bdev_io_cache_size = 64; 4587 rc = spdk_bdev_set_opts(&bdev_opts); 4588 CU_ASSERT(rc == 0); 4589 4590 spdk_bdev_initialize(bdev_init_cb, NULL); 4591 bdev = allocate_bdev("bdev"); 4592 4593 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4594 CU_ASSERT_EQUAL(rc, 0); 4595 SPDK_CU_ASSERT_FATAL(desc != NULL); 4596 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4597 ioch = spdk_bdev_get_io_channel(desc); 4598 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4599 bdev_ch = spdk_io_channel_get_ctx(ioch); 4600 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4601 4602 fn_table.submit_request = stub_submit_request; 4603 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4604 4605 /* Case 1: First test the request won't be split */ 4606 num_blocks = 32; 4607 4608 g_io_done = false; 4609 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4610 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4611 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4612 CU_ASSERT_EQUAL(rc, 0); 4613 CU_ASSERT(g_io_done == false); 4614 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4615 stub_complete_io(1); 4616 CU_ASSERT(g_io_done == true); 4617 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4618 4619 /* Case 2: Test the split with 2 children requests */ 4620 max_write_zeroes_blocks = 8; 4621 bdev->max_write_zeroes = max_write_zeroes_blocks; 4622 num_blocks = max_write_zeroes_blocks * 2; 4623 offset = 0; 4624 4625 g_io_done = false; 4626 for (i = 0; i < 2; i++) { 4627 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4628 0); 4629 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4630 offset += max_write_zeroes_blocks; 4631 } 4632 4633 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4634 CU_ASSERT_EQUAL(rc, 0); 4635 CU_ASSERT(g_io_done == false); 4636 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4637 stub_complete_io(2); 4638 CU_ASSERT(g_io_done == true); 4639 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4640 4641 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4642 num_children = 15; 4643 num_blocks = max_write_zeroes_blocks * num_children; 4644 g_io_done = false; 4645 offset = 0; 4646 for (i = 0; i < num_children; i++) { 4647 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4648 0); 4649 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4650 offset += max_write_zeroes_blocks; 4651 } 4652 4653 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4654 CU_ASSERT_EQUAL(rc, 0); 4655 CU_ASSERT(g_io_done == false); 4656 4657 while (num_children > 0) { 4658 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4659 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4660 stub_complete_io(num_outstanding); 4661 num_children -= num_outstanding; 4662 } 4663 CU_ASSERT(g_io_done == true); 4664 4665 spdk_put_io_channel(ioch); 4666 spdk_bdev_close(desc); 4667 free_bdev(bdev); 4668 spdk_bdev_finish(bdev_fini_cb, NULL); 4669 poll_threads(); 4670 } 4671 4672 static void 4673 bdev_set_options_test(void) 4674 { 4675 struct spdk_bdev_opts bdev_opts = {}; 4676 int rc; 4677 4678 /* Case1: Do not set opts_size */ 4679 rc = spdk_bdev_set_opts(&bdev_opts); 4680 CU_ASSERT(rc == -1); 4681 4682 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4683 bdev_opts.bdev_io_pool_size = 4; 4684 bdev_opts.bdev_io_cache_size = 2; 4685 bdev_opts.small_buf_pool_size = 4; 4686 4687 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4688 rc = spdk_bdev_set_opts(&bdev_opts); 4689 CU_ASSERT(rc == -1); 4690 4691 /* Case 3: Do not set valid large_buf_pool_size */ 4692 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4693 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4694 rc = spdk_bdev_set_opts(&bdev_opts); 4695 CU_ASSERT(rc == -1); 4696 4697 /* Case4: set valid large buf_pool_size */ 4698 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4699 rc = spdk_bdev_set_opts(&bdev_opts); 4700 CU_ASSERT(rc == 0); 4701 4702 /* Case5: Set different valid value for small and large buf pool */ 4703 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4704 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4705 rc = spdk_bdev_set_opts(&bdev_opts); 4706 CU_ASSERT(rc == 0); 4707 } 4708 4709 static uint64_t 4710 get_ns_time(void) 4711 { 4712 int rc; 4713 struct timespec ts; 4714 4715 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 4716 CU_ASSERT(rc == 0); 4717 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 4718 } 4719 4720 static int 4721 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 4722 { 4723 int h1, h2; 4724 4725 if (bdev_name == NULL) { 4726 return -1; 4727 } else { 4728 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 4729 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 4730 4731 return spdk_max(h1, h2) + 1; 4732 } 4733 } 4734 4735 static void 4736 bdev_multi_allocation(void) 4737 { 4738 const int max_bdev_num = 1024 * 16; 4739 char name[max_bdev_num][10]; 4740 char noexist_name[] = "invalid_bdev"; 4741 struct spdk_bdev *bdev[max_bdev_num]; 4742 int i, j; 4743 uint64_t last_time; 4744 int bdev_num; 4745 int height; 4746 4747 for (j = 0; j < max_bdev_num; j++) { 4748 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 4749 } 4750 4751 for (i = 0; i < 16; i++) { 4752 last_time = get_ns_time(); 4753 bdev_num = 1024 * (i + 1); 4754 for (j = 0; j < bdev_num; j++) { 4755 bdev[j] = allocate_bdev(name[j]); 4756 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 4757 CU_ASSERT(height <= (int)(spdk_u32log2(j + 1))); 4758 } 4759 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 4760 (get_ns_time() - last_time) / 1000 / 1000); 4761 for (j = 0; j < bdev_num; j++) { 4762 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 4763 } 4764 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 4765 4766 for (j = 0; j < bdev_num; j++) { 4767 free_bdev(bdev[j]); 4768 } 4769 for (j = 0; j < bdev_num; j++) { 4770 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 4771 } 4772 } 4773 } 4774 4775 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 4776 4777 static int 4778 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 4779 int array_size) 4780 { 4781 if (array_size > 0 && domains) { 4782 domains[0] = g_bdev_memory_domain; 4783 } 4784 4785 return 1; 4786 } 4787 4788 static void 4789 bdev_get_memory_domains(void) 4790 { 4791 struct spdk_bdev_fn_table fn_table = { 4792 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 4793 }; 4794 struct spdk_bdev bdev = { .fn_table = &fn_table }; 4795 struct spdk_memory_domain *domains[2] = {}; 4796 int rc; 4797 4798 /* bdev is NULL */ 4799 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 4800 CU_ASSERT(rc == -EINVAL); 4801 4802 /* domains is NULL */ 4803 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 4804 CU_ASSERT(rc == 1); 4805 4806 /* array size is 0 */ 4807 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 4808 CU_ASSERT(rc == 1); 4809 4810 /* get_supported_dma_device_types op is set */ 4811 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 4812 CU_ASSERT(rc == 1); 4813 CU_ASSERT(domains[0] == g_bdev_memory_domain); 4814 4815 /* get_supported_dma_device_types op is not set */ 4816 fn_table.get_memory_domains = NULL; 4817 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 4818 CU_ASSERT(rc == 0); 4819 } 4820 4821 int 4822 main(int argc, char **argv) 4823 { 4824 CU_pSuite suite = NULL; 4825 unsigned int num_failures; 4826 4827 CU_set_error_action(CUEA_ABORT); 4828 CU_initialize_registry(); 4829 4830 suite = CU_add_suite("bdev", null_init, null_clean); 4831 4832 CU_ADD_TEST(suite, bytes_to_blocks_test); 4833 CU_ADD_TEST(suite, num_blocks_test); 4834 CU_ADD_TEST(suite, io_valid_test); 4835 CU_ADD_TEST(suite, open_write_test); 4836 CU_ADD_TEST(suite, alias_add_del_test); 4837 CU_ADD_TEST(suite, get_device_stat_test); 4838 CU_ADD_TEST(suite, bdev_io_types_test); 4839 CU_ADD_TEST(suite, bdev_io_wait_test); 4840 CU_ADD_TEST(suite, bdev_io_spans_split_test); 4841 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 4842 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 4843 CU_ADD_TEST(suite, bdev_io_mix_split_test); 4844 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 4845 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 4846 CU_ADD_TEST(suite, bdev_io_alignment); 4847 CU_ADD_TEST(suite, bdev_histograms); 4848 CU_ADD_TEST(suite, bdev_write_zeroes); 4849 CU_ADD_TEST(suite, bdev_compare_and_write); 4850 CU_ADD_TEST(suite, bdev_compare); 4851 CU_ADD_TEST(suite, bdev_zcopy_write); 4852 CU_ADD_TEST(suite, bdev_zcopy_read); 4853 CU_ADD_TEST(suite, bdev_open_while_hotremove); 4854 CU_ADD_TEST(suite, bdev_close_while_hotremove); 4855 CU_ADD_TEST(suite, bdev_open_ext); 4856 CU_ADD_TEST(suite, bdev_set_io_timeout); 4857 CU_ADD_TEST(suite, lba_range_overlap); 4858 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 4859 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 4860 CU_ADD_TEST(suite, lock_lba_range_overlapped); 4861 CU_ADD_TEST(suite, bdev_io_abort); 4862 CU_ADD_TEST(suite, bdev_unmap); 4863 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 4864 CU_ADD_TEST(suite, bdev_set_options_test); 4865 CU_ADD_TEST(suite, bdev_multi_allocation); 4866 CU_ADD_TEST(suite, bdev_get_memory_domains); 4867 4868 allocate_cores(1); 4869 allocate_threads(1); 4870 set_thread(0); 4871 4872 CU_basic_set_mode(CU_BRM_VERBOSE); 4873 CU_basic_run_tests(); 4874 num_failures = CU_get_number_of_failures(); 4875 CU_cleanup_registry(); 4876 4877 free_threads(); 4878 free_cores(); 4879 4880 return num_failures; 4881 } 4882