1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk_cunit.h" 36 37 #include "common/lib/ut_multithread.c" 38 #include "unit/lib/json_mock.c" 39 40 #include "spdk/config.h" 41 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 42 #undef SPDK_CONFIG_VTUNE 43 44 #include "bdev/bdev.c" 45 46 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 47 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 48 49 int g_status; 50 int g_count; 51 enum spdk_bdev_event_type g_event_type1; 52 enum spdk_bdev_event_type g_event_type2; 53 struct spdk_histogram_data *g_histogram; 54 void *g_unregister_arg; 55 int g_unregister_rc; 56 57 void 58 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 59 int *sc, int *sk, int *asc, int *ascq) 60 { 61 } 62 63 static int 64 null_init(void) 65 { 66 return 0; 67 } 68 69 static int 70 null_clean(void) 71 { 72 return 0; 73 } 74 75 static int 76 stub_destruct(void *ctx) 77 { 78 return 0; 79 } 80 81 struct ut_expected_io { 82 uint8_t type; 83 uint64_t offset; 84 uint64_t length; 85 int iovcnt; 86 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 87 void *md_buf; 88 struct spdk_bdev_ext_io_opts *ext_io_opts; 89 TAILQ_ENTRY(ut_expected_io) link; 90 }; 91 92 struct bdev_ut_channel { 93 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 94 uint32_t outstanding_io_count; 95 TAILQ_HEAD(, ut_expected_io) expected_io; 96 }; 97 98 static bool g_io_done; 99 static struct spdk_bdev_io *g_bdev_io; 100 static enum spdk_bdev_io_status g_io_status; 101 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 102 static uint32_t g_bdev_ut_io_device; 103 static struct bdev_ut_channel *g_bdev_ut_channel; 104 static void *g_compare_read_buf; 105 static uint32_t g_compare_read_buf_len; 106 static void *g_compare_write_buf; 107 static uint32_t g_compare_write_buf_len; 108 static bool g_abort_done; 109 static enum spdk_bdev_io_status g_abort_status; 110 static void *g_zcopy_read_buf; 111 static uint32_t g_zcopy_read_buf_len; 112 static void *g_zcopy_write_buf; 113 static uint32_t g_zcopy_write_buf_len; 114 static struct spdk_bdev_io *g_zcopy_bdev_io; 115 116 static struct ut_expected_io * 117 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 118 { 119 struct ut_expected_io *expected_io; 120 121 expected_io = calloc(1, sizeof(*expected_io)); 122 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 123 124 expected_io->type = type; 125 expected_io->offset = offset; 126 expected_io->length = length; 127 expected_io->iovcnt = iovcnt; 128 129 return expected_io; 130 } 131 132 static void 133 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 134 { 135 expected_io->iov[pos].iov_base = base; 136 expected_io->iov[pos].iov_len = len; 137 } 138 139 static void 140 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 141 { 142 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 143 struct ut_expected_io *expected_io; 144 struct iovec *iov, *expected_iov; 145 struct spdk_bdev_io *bio_to_abort; 146 int i; 147 148 g_bdev_io = bdev_io; 149 150 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 151 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 152 153 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 154 CU_ASSERT(g_compare_read_buf_len == len); 155 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 156 } 157 158 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 159 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 160 161 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 162 CU_ASSERT(g_compare_write_buf_len == len); 163 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 164 } 165 166 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 167 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 168 169 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 170 CU_ASSERT(g_compare_read_buf_len == len); 171 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 172 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 173 } 174 } 175 176 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 177 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 178 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 179 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 180 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 181 ch->outstanding_io_count--; 182 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 183 break; 184 } 185 } 186 } 187 } 188 189 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 190 if (bdev_io->u.bdev.zcopy.start) { 191 g_zcopy_bdev_io = bdev_io; 192 if (bdev_io->u.bdev.zcopy.populate) { 193 /* Start of a read */ 194 CU_ASSERT(g_zcopy_read_buf != NULL); 195 CU_ASSERT(g_zcopy_read_buf_len > 0); 196 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 197 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 198 bdev_io->u.bdev.iovcnt = 1; 199 } else { 200 /* Start of a write */ 201 CU_ASSERT(g_zcopy_write_buf != NULL); 202 CU_ASSERT(g_zcopy_write_buf_len > 0); 203 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 204 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 205 bdev_io->u.bdev.iovcnt = 1; 206 } 207 } else { 208 if (bdev_io->u.bdev.zcopy.commit) { 209 /* End of write */ 210 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 211 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 212 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 213 g_zcopy_write_buf = NULL; 214 g_zcopy_write_buf_len = 0; 215 } else { 216 /* End of read */ 217 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 218 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 219 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 220 g_zcopy_read_buf = NULL; 221 g_zcopy_read_buf_len = 0; 222 } 223 } 224 } 225 226 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 227 ch->outstanding_io_count++; 228 229 expected_io = TAILQ_FIRST(&ch->expected_io); 230 if (expected_io == NULL) { 231 return; 232 } 233 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 234 235 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 236 CU_ASSERT(bdev_io->type == expected_io->type); 237 } 238 239 if (expected_io->md_buf != NULL) { 240 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 241 } 242 243 if (expected_io->length == 0) { 244 free(expected_io); 245 return; 246 } 247 248 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 249 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 250 251 if (expected_io->iovcnt == 0) { 252 free(expected_io); 253 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 254 return; 255 } 256 257 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 258 for (i = 0; i < expected_io->iovcnt; i++) { 259 iov = &bdev_io->u.bdev.iovs[i]; 260 expected_iov = &expected_io->iov[i]; 261 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 262 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 263 } 264 265 if (expected_io->ext_io_opts) { 266 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts) 267 } 268 269 free(expected_io); 270 } 271 272 static void 273 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 274 struct spdk_bdev_io *bdev_io, bool success) 275 { 276 CU_ASSERT(success == true); 277 278 stub_submit_request(_ch, bdev_io); 279 } 280 281 static void 282 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 283 { 284 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 285 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 286 } 287 288 static uint32_t 289 stub_complete_io(uint32_t num_to_complete) 290 { 291 struct bdev_ut_channel *ch = g_bdev_ut_channel; 292 struct spdk_bdev_io *bdev_io; 293 static enum spdk_bdev_io_status io_status; 294 uint32_t num_completed = 0; 295 296 while (num_completed < num_to_complete) { 297 if (TAILQ_EMPTY(&ch->outstanding_io)) { 298 break; 299 } 300 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 301 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 302 ch->outstanding_io_count--; 303 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 304 g_io_exp_status; 305 spdk_bdev_io_complete(bdev_io, io_status); 306 num_completed++; 307 } 308 309 return num_completed; 310 } 311 312 static struct spdk_io_channel * 313 bdev_ut_get_io_channel(void *ctx) 314 { 315 return spdk_get_io_channel(&g_bdev_ut_io_device); 316 } 317 318 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 319 [SPDK_BDEV_IO_TYPE_READ] = true, 320 [SPDK_BDEV_IO_TYPE_WRITE] = true, 321 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 322 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 323 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 324 [SPDK_BDEV_IO_TYPE_RESET] = true, 325 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 326 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 327 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 328 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 329 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 330 [SPDK_BDEV_IO_TYPE_ABORT] = true, 331 }; 332 333 static void 334 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 335 { 336 g_io_types_supported[io_type] = enable; 337 } 338 339 static bool 340 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 341 { 342 return g_io_types_supported[io_type]; 343 } 344 345 static struct spdk_bdev_fn_table fn_table = { 346 .destruct = stub_destruct, 347 .submit_request = stub_submit_request, 348 .get_io_channel = bdev_ut_get_io_channel, 349 .io_type_supported = stub_io_type_supported, 350 }; 351 352 static int 353 bdev_ut_create_ch(void *io_device, void *ctx_buf) 354 { 355 struct bdev_ut_channel *ch = ctx_buf; 356 357 CU_ASSERT(g_bdev_ut_channel == NULL); 358 g_bdev_ut_channel = ch; 359 360 TAILQ_INIT(&ch->outstanding_io); 361 ch->outstanding_io_count = 0; 362 TAILQ_INIT(&ch->expected_io); 363 return 0; 364 } 365 366 static void 367 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 368 { 369 CU_ASSERT(g_bdev_ut_channel != NULL); 370 g_bdev_ut_channel = NULL; 371 } 372 373 struct spdk_bdev_module bdev_ut_if; 374 375 static int 376 bdev_ut_module_init(void) 377 { 378 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 379 sizeof(struct bdev_ut_channel), NULL); 380 spdk_bdev_module_init_done(&bdev_ut_if); 381 return 0; 382 } 383 384 static void 385 bdev_ut_module_fini(void) 386 { 387 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 388 } 389 390 struct spdk_bdev_module bdev_ut_if = { 391 .name = "bdev_ut", 392 .module_init = bdev_ut_module_init, 393 .module_fini = bdev_ut_module_fini, 394 .async_init = true, 395 }; 396 397 static void vbdev_ut_examine(struct spdk_bdev *bdev); 398 399 static int 400 vbdev_ut_module_init(void) 401 { 402 return 0; 403 } 404 405 static void 406 vbdev_ut_module_fini(void) 407 { 408 } 409 410 struct spdk_bdev_module vbdev_ut_if = { 411 .name = "vbdev_ut", 412 .module_init = vbdev_ut_module_init, 413 .module_fini = vbdev_ut_module_fini, 414 .examine_config = vbdev_ut_examine, 415 }; 416 417 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 418 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 419 420 static void 421 vbdev_ut_examine(struct spdk_bdev *bdev) 422 { 423 spdk_bdev_module_examine_done(&vbdev_ut_if); 424 } 425 426 static struct spdk_bdev * 427 allocate_bdev(char *name) 428 { 429 struct spdk_bdev *bdev; 430 int rc; 431 432 bdev = calloc(1, sizeof(*bdev)); 433 SPDK_CU_ASSERT_FATAL(bdev != NULL); 434 435 bdev->name = name; 436 bdev->fn_table = &fn_table; 437 bdev->module = &bdev_ut_if; 438 bdev->blockcnt = 1024; 439 bdev->blocklen = 512; 440 441 rc = spdk_bdev_register(bdev); 442 CU_ASSERT(rc == 0); 443 444 return bdev; 445 } 446 447 static struct spdk_bdev * 448 allocate_vbdev(char *name) 449 { 450 struct spdk_bdev *bdev; 451 int rc; 452 453 bdev = calloc(1, sizeof(*bdev)); 454 SPDK_CU_ASSERT_FATAL(bdev != NULL); 455 456 bdev->name = name; 457 bdev->fn_table = &fn_table; 458 bdev->module = &vbdev_ut_if; 459 460 rc = spdk_bdev_register(bdev); 461 CU_ASSERT(rc == 0); 462 463 return bdev; 464 } 465 466 static void 467 free_bdev(struct spdk_bdev *bdev) 468 { 469 spdk_bdev_unregister(bdev, NULL, NULL); 470 poll_threads(); 471 memset(bdev, 0xFF, sizeof(*bdev)); 472 free(bdev); 473 } 474 475 static void 476 free_vbdev(struct spdk_bdev *bdev) 477 { 478 spdk_bdev_unregister(bdev, NULL, NULL); 479 poll_threads(); 480 memset(bdev, 0xFF, sizeof(*bdev)); 481 free(bdev); 482 } 483 484 static void 485 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 486 { 487 const char *bdev_name; 488 489 CU_ASSERT(bdev != NULL); 490 CU_ASSERT(rc == 0); 491 bdev_name = spdk_bdev_get_name(bdev); 492 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 493 494 free(stat); 495 496 *(bool *)cb_arg = true; 497 } 498 499 static void 500 bdev_unregister_cb(void *cb_arg, int rc) 501 { 502 g_unregister_arg = cb_arg; 503 g_unregister_rc = rc; 504 } 505 506 static void 507 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 508 { 509 } 510 511 static void 512 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 513 { 514 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 515 516 g_event_type1 = type; 517 if (SPDK_BDEV_EVENT_REMOVE == type) { 518 spdk_bdev_close(desc); 519 } 520 } 521 522 static void 523 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 524 { 525 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 526 527 g_event_type2 = type; 528 if (SPDK_BDEV_EVENT_REMOVE == type) { 529 spdk_bdev_close(desc); 530 } 531 } 532 533 static void 534 get_device_stat_test(void) 535 { 536 struct spdk_bdev *bdev; 537 struct spdk_bdev_io_stat *stat; 538 bool done; 539 540 bdev = allocate_bdev("bdev0"); 541 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 542 if (stat == NULL) { 543 free_bdev(bdev); 544 return; 545 } 546 547 done = false; 548 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 549 while (!done) { poll_threads(); } 550 551 free_bdev(bdev); 552 } 553 554 static void 555 open_write_test(void) 556 { 557 struct spdk_bdev *bdev[9]; 558 struct spdk_bdev_desc *desc[9] = {}; 559 int rc; 560 561 /* 562 * Create a tree of bdevs to test various open w/ write cases. 563 * 564 * bdev0 through bdev3 are physical block devices, such as NVMe 565 * namespaces or Ceph block devices. 566 * 567 * bdev4 is a virtual bdev with multiple base bdevs. This models 568 * caching or RAID use cases. 569 * 570 * bdev5 through bdev7 are all virtual bdevs with the same base 571 * bdev (except bdev7). This models partitioning or logical volume 572 * use cases. 573 * 574 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 575 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 576 * models caching, RAID, partitioning or logical volumes use cases. 577 * 578 * bdev8 is a virtual bdev with multiple base bdevs, but these 579 * base bdevs are themselves virtual bdevs. 580 * 581 * bdev8 582 * | 583 * +----------+ 584 * | | 585 * bdev4 bdev5 bdev6 bdev7 586 * | | | | 587 * +---+---+ +---+ + +---+---+ 588 * | | \ | / \ 589 * bdev0 bdev1 bdev2 bdev3 590 */ 591 592 bdev[0] = allocate_bdev("bdev0"); 593 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 594 CU_ASSERT(rc == 0); 595 596 bdev[1] = allocate_bdev("bdev1"); 597 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 598 CU_ASSERT(rc == 0); 599 600 bdev[2] = allocate_bdev("bdev2"); 601 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 602 CU_ASSERT(rc == 0); 603 604 bdev[3] = allocate_bdev("bdev3"); 605 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 606 CU_ASSERT(rc == 0); 607 608 bdev[4] = allocate_vbdev("bdev4"); 609 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 610 CU_ASSERT(rc == 0); 611 612 bdev[5] = allocate_vbdev("bdev5"); 613 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 614 CU_ASSERT(rc == 0); 615 616 bdev[6] = allocate_vbdev("bdev6"); 617 618 bdev[7] = allocate_vbdev("bdev7"); 619 620 bdev[8] = allocate_vbdev("bdev8"); 621 622 /* Open bdev0 read-only. This should succeed. */ 623 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 624 CU_ASSERT(rc == 0); 625 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 626 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 627 spdk_bdev_close(desc[0]); 628 629 /* 630 * Open bdev1 read/write. This should fail since bdev1 has been claimed 631 * by a vbdev module. 632 */ 633 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 634 CU_ASSERT(rc == -EPERM); 635 636 /* 637 * Open bdev4 read/write. This should fail since bdev3 has been claimed 638 * by a vbdev module. 639 */ 640 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 641 CU_ASSERT(rc == -EPERM); 642 643 /* Open bdev4 read-only. This should succeed. */ 644 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 645 CU_ASSERT(rc == 0); 646 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 647 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 648 spdk_bdev_close(desc[4]); 649 650 /* 651 * Open bdev8 read/write. This should succeed since it is a leaf 652 * bdev. 653 */ 654 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 655 CU_ASSERT(rc == 0); 656 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 657 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 658 spdk_bdev_close(desc[8]); 659 660 /* 661 * Open bdev5 read/write. This should fail since bdev4 has been claimed 662 * by a vbdev module. 663 */ 664 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 665 CU_ASSERT(rc == -EPERM); 666 667 /* Open bdev4 read-only. This should succeed. */ 668 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 669 CU_ASSERT(rc == 0); 670 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 671 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 672 spdk_bdev_close(desc[5]); 673 674 free_vbdev(bdev[8]); 675 676 free_vbdev(bdev[5]); 677 free_vbdev(bdev[6]); 678 free_vbdev(bdev[7]); 679 680 free_vbdev(bdev[4]); 681 682 free_bdev(bdev[0]); 683 free_bdev(bdev[1]); 684 free_bdev(bdev[2]); 685 free_bdev(bdev[3]); 686 } 687 688 static void 689 bytes_to_blocks_test(void) 690 { 691 struct spdk_bdev bdev; 692 uint64_t offset_blocks, num_blocks; 693 694 memset(&bdev, 0, sizeof(bdev)); 695 696 bdev.blocklen = 512; 697 698 /* All parameters valid */ 699 offset_blocks = 0; 700 num_blocks = 0; 701 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 702 CU_ASSERT(offset_blocks == 1); 703 CU_ASSERT(num_blocks == 2); 704 705 /* Offset not a block multiple */ 706 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 707 708 /* Length not a block multiple */ 709 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 710 711 /* In case blocklen not the power of two */ 712 bdev.blocklen = 100; 713 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 714 CU_ASSERT(offset_blocks == 1); 715 CU_ASSERT(num_blocks == 2); 716 717 /* Offset not a block multiple */ 718 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 719 720 /* Length not a block multiple */ 721 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 722 } 723 724 static void 725 num_blocks_test(void) 726 { 727 struct spdk_bdev bdev; 728 struct spdk_bdev_desc *desc = NULL; 729 int rc; 730 731 memset(&bdev, 0, sizeof(bdev)); 732 bdev.name = "num_blocks"; 733 bdev.fn_table = &fn_table; 734 bdev.module = &bdev_ut_if; 735 spdk_bdev_register(&bdev); 736 spdk_bdev_notify_blockcnt_change(&bdev, 50); 737 738 /* Growing block number */ 739 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 740 /* Shrinking block number */ 741 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 742 743 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 744 CU_ASSERT(rc == 0); 745 SPDK_CU_ASSERT_FATAL(desc != NULL); 746 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 747 748 /* Growing block number */ 749 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 750 /* Shrinking block number */ 751 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 752 753 g_event_type1 = 0xFF; 754 /* Growing block number */ 755 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 756 757 poll_threads(); 758 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 759 760 g_event_type1 = 0xFF; 761 /* Growing block number and closing */ 762 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 763 764 spdk_bdev_close(desc); 765 spdk_bdev_unregister(&bdev, NULL, NULL); 766 767 poll_threads(); 768 769 /* Callback is not called for closed device */ 770 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 771 } 772 773 static void 774 io_valid_test(void) 775 { 776 struct spdk_bdev bdev; 777 778 memset(&bdev, 0, sizeof(bdev)); 779 780 bdev.blocklen = 512; 781 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 782 783 spdk_bdev_notify_blockcnt_change(&bdev, 100); 784 785 /* All parameters valid */ 786 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 787 788 /* Last valid block */ 789 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 790 791 /* Offset past end of bdev */ 792 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 793 794 /* Offset + length past end of bdev */ 795 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 796 797 /* Offset near end of uint64_t range (2^64 - 1) */ 798 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 799 800 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 801 } 802 803 static void 804 alias_add_del_test(void) 805 { 806 struct spdk_bdev *bdev[3]; 807 int rc; 808 809 /* Creating and registering bdevs */ 810 bdev[0] = allocate_bdev("bdev0"); 811 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 812 813 bdev[1] = allocate_bdev("bdev1"); 814 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 815 816 bdev[2] = allocate_bdev("bdev2"); 817 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 818 819 poll_threads(); 820 821 /* 822 * Trying adding an alias identical to name. 823 * Alias is identical to name, so it can not be added to aliases list 824 */ 825 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 826 CU_ASSERT(rc == -EEXIST); 827 828 /* 829 * Trying to add empty alias, 830 * this one should fail 831 */ 832 rc = spdk_bdev_alias_add(bdev[0], NULL); 833 CU_ASSERT(rc == -EINVAL); 834 835 /* Trying adding same alias to two different registered bdevs */ 836 837 /* Alias is used first time, so this one should pass */ 838 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 839 CU_ASSERT(rc == 0); 840 841 /* Alias was added to another bdev, so this one should fail */ 842 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 843 CU_ASSERT(rc == -EEXIST); 844 845 /* Alias is used first time, so this one should pass */ 846 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 847 CU_ASSERT(rc == 0); 848 849 /* Trying removing an alias from registered bdevs */ 850 851 /* Alias is not on a bdev aliases list, so this one should fail */ 852 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 853 CU_ASSERT(rc == -ENOENT); 854 855 /* Alias is present on a bdev aliases list, so this one should pass */ 856 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 857 CU_ASSERT(rc == 0); 858 859 /* Alias is present on a bdev aliases list, so this one should pass */ 860 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 861 CU_ASSERT(rc == 0); 862 863 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 864 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 865 CU_ASSERT(rc != 0); 866 867 /* Trying to del all alias from empty alias list */ 868 spdk_bdev_alias_del_all(bdev[2]); 869 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 870 871 /* Trying to del all alias from non-empty alias list */ 872 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 873 CU_ASSERT(rc == 0); 874 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 875 CU_ASSERT(rc == 0); 876 spdk_bdev_alias_del_all(bdev[2]); 877 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 878 879 /* Unregister and free bdevs */ 880 spdk_bdev_unregister(bdev[0], NULL, NULL); 881 spdk_bdev_unregister(bdev[1], NULL, NULL); 882 spdk_bdev_unregister(bdev[2], NULL, NULL); 883 884 poll_threads(); 885 886 free(bdev[0]); 887 free(bdev[1]); 888 free(bdev[2]); 889 } 890 891 static void 892 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 893 { 894 g_io_done = true; 895 g_io_status = bdev_io->internal.status; 896 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 897 (bdev_io->u.bdev.zcopy.start)) { 898 g_zcopy_bdev_io = bdev_io; 899 } else { 900 spdk_bdev_free_io(bdev_io); 901 g_zcopy_bdev_io = NULL; 902 } 903 } 904 905 static void 906 bdev_init_cb(void *arg, int rc) 907 { 908 CU_ASSERT(rc == 0); 909 } 910 911 static void 912 bdev_fini_cb(void *arg) 913 { 914 } 915 916 struct bdev_ut_io_wait_entry { 917 struct spdk_bdev_io_wait_entry entry; 918 struct spdk_io_channel *io_ch; 919 struct spdk_bdev_desc *desc; 920 bool submitted; 921 }; 922 923 static void 924 io_wait_cb(void *arg) 925 { 926 struct bdev_ut_io_wait_entry *entry = arg; 927 int rc; 928 929 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 930 CU_ASSERT(rc == 0); 931 entry->submitted = true; 932 } 933 934 static void 935 bdev_io_types_test(void) 936 { 937 struct spdk_bdev *bdev; 938 struct spdk_bdev_desc *desc = NULL; 939 struct spdk_io_channel *io_ch; 940 struct spdk_bdev_opts bdev_opts = {}; 941 int rc; 942 943 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 944 bdev_opts.bdev_io_pool_size = 4; 945 bdev_opts.bdev_io_cache_size = 2; 946 947 rc = spdk_bdev_set_opts(&bdev_opts); 948 CU_ASSERT(rc == 0); 949 spdk_bdev_initialize(bdev_init_cb, NULL); 950 poll_threads(); 951 952 bdev = allocate_bdev("bdev0"); 953 954 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 955 CU_ASSERT(rc == 0); 956 poll_threads(); 957 SPDK_CU_ASSERT_FATAL(desc != NULL); 958 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 959 io_ch = spdk_bdev_get_io_channel(desc); 960 CU_ASSERT(io_ch != NULL); 961 962 /* WRITE and WRITE ZEROES are not supported */ 963 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 964 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 965 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 966 CU_ASSERT(rc == -ENOTSUP); 967 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 968 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 969 970 spdk_put_io_channel(io_ch); 971 spdk_bdev_close(desc); 972 free_bdev(bdev); 973 spdk_bdev_finish(bdev_fini_cb, NULL); 974 poll_threads(); 975 } 976 977 static void 978 bdev_io_wait_test(void) 979 { 980 struct spdk_bdev *bdev; 981 struct spdk_bdev_desc *desc = NULL; 982 struct spdk_io_channel *io_ch; 983 struct spdk_bdev_opts bdev_opts = {}; 984 struct bdev_ut_io_wait_entry io_wait_entry; 985 struct bdev_ut_io_wait_entry io_wait_entry2; 986 int rc; 987 988 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 989 bdev_opts.bdev_io_pool_size = 4; 990 bdev_opts.bdev_io_cache_size = 2; 991 992 rc = spdk_bdev_set_opts(&bdev_opts); 993 CU_ASSERT(rc == 0); 994 spdk_bdev_initialize(bdev_init_cb, NULL); 995 poll_threads(); 996 997 bdev = allocate_bdev("bdev0"); 998 999 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1000 CU_ASSERT(rc == 0); 1001 poll_threads(); 1002 SPDK_CU_ASSERT_FATAL(desc != NULL); 1003 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1004 io_ch = spdk_bdev_get_io_channel(desc); 1005 CU_ASSERT(io_ch != NULL); 1006 1007 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1008 CU_ASSERT(rc == 0); 1009 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1010 CU_ASSERT(rc == 0); 1011 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1012 CU_ASSERT(rc == 0); 1013 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1014 CU_ASSERT(rc == 0); 1015 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1016 1017 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1018 CU_ASSERT(rc == -ENOMEM); 1019 1020 io_wait_entry.entry.bdev = bdev; 1021 io_wait_entry.entry.cb_fn = io_wait_cb; 1022 io_wait_entry.entry.cb_arg = &io_wait_entry; 1023 io_wait_entry.io_ch = io_ch; 1024 io_wait_entry.desc = desc; 1025 io_wait_entry.submitted = false; 1026 /* Cannot use the same io_wait_entry for two different calls. */ 1027 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1028 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1029 1030 /* Queue two I/O waits. */ 1031 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1032 CU_ASSERT(rc == 0); 1033 CU_ASSERT(io_wait_entry.submitted == false); 1034 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1035 CU_ASSERT(rc == 0); 1036 CU_ASSERT(io_wait_entry2.submitted == false); 1037 1038 stub_complete_io(1); 1039 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1040 CU_ASSERT(io_wait_entry.submitted == true); 1041 CU_ASSERT(io_wait_entry2.submitted == false); 1042 1043 stub_complete_io(1); 1044 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1045 CU_ASSERT(io_wait_entry2.submitted == true); 1046 1047 stub_complete_io(4); 1048 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1049 1050 spdk_put_io_channel(io_ch); 1051 spdk_bdev_close(desc); 1052 free_bdev(bdev); 1053 spdk_bdev_finish(bdev_fini_cb, NULL); 1054 poll_threads(); 1055 } 1056 1057 static void 1058 bdev_io_spans_split_test(void) 1059 { 1060 struct spdk_bdev bdev; 1061 struct spdk_bdev_io bdev_io; 1062 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1063 1064 memset(&bdev, 0, sizeof(bdev)); 1065 bdev_io.u.bdev.iovs = iov; 1066 1067 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1068 bdev.optimal_io_boundary = 0; 1069 bdev.max_segment_size = 0; 1070 bdev.max_num_segments = 0; 1071 bdev_io.bdev = &bdev; 1072 1073 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1074 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1075 1076 bdev.split_on_optimal_io_boundary = true; 1077 bdev.optimal_io_boundary = 32; 1078 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1079 1080 /* RESETs are not based on LBAs - so this should return false. */ 1081 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1082 1083 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1084 bdev_io.u.bdev.offset_blocks = 0; 1085 bdev_io.u.bdev.num_blocks = 32; 1086 1087 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1088 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1089 1090 bdev_io.u.bdev.num_blocks = 33; 1091 1092 /* This I/O spans a boundary. */ 1093 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1094 1095 bdev_io.u.bdev.num_blocks = 32; 1096 bdev.max_segment_size = 512 * 32; 1097 bdev.max_num_segments = 1; 1098 bdev_io.u.bdev.iovcnt = 1; 1099 iov[0].iov_len = 512; 1100 1101 /* Does not cross and exceed max_size or max_segs */ 1102 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1103 1104 bdev.split_on_optimal_io_boundary = false; 1105 bdev.max_segment_size = 512; 1106 bdev.max_num_segments = 1; 1107 bdev_io.u.bdev.iovcnt = 2; 1108 1109 /* Exceed max_segs */ 1110 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1111 1112 bdev.max_num_segments = 2; 1113 iov[0].iov_len = 513; 1114 iov[1].iov_len = 512; 1115 1116 /* Exceed max_sizes */ 1117 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1118 } 1119 1120 static void 1121 bdev_io_boundary_split_test(void) 1122 { 1123 struct spdk_bdev *bdev; 1124 struct spdk_bdev_desc *desc = NULL; 1125 struct spdk_io_channel *io_ch; 1126 struct spdk_bdev_opts bdev_opts = {}; 1127 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1128 struct ut_expected_io *expected_io; 1129 void *md_buf = (void *)0xFF000000; 1130 uint64_t i; 1131 int rc; 1132 1133 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1134 bdev_opts.bdev_io_pool_size = 512; 1135 bdev_opts.bdev_io_cache_size = 64; 1136 1137 rc = spdk_bdev_set_opts(&bdev_opts); 1138 CU_ASSERT(rc == 0); 1139 spdk_bdev_initialize(bdev_init_cb, NULL); 1140 1141 bdev = allocate_bdev("bdev0"); 1142 1143 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1144 CU_ASSERT(rc == 0); 1145 SPDK_CU_ASSERT_FATAL(desc != NULL); 1146 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1147 io_ch = spdk_bdev_get_io_channel(desc); 1148 CU_ASSERT(io_ch != NULL); 1149 1150 bdev->optimal_io_boundary = 16; 1151 bdev->split_on_optimal_io_boundary = false; 1152 1153 g_io_done = false; 1154 1155 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1156 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1157 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1158 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1159 1160 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1161 CU_ASSERT(rc == 0); 1162 CU_ASSERT(g_io_done == false); 1163 1164 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1165 stub_complete_io(1); 1166 CU_ASSERT(g_io_done == true); 1167 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1168 1169 bdev->split_on_optimal_io_boundary = true; 1170 bdev->md_interleave = false; 1171 bdev->md_len = 8; 1172 1173 /* Now test that a single-vector command is split correctly. 1174 * Offset 14, length 8, payload 0xF000 1175 * Child - Offset 14, length 2, payload 0xF000 1176 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1177 * 1178 * Set up the expected values before calling spdk_bdev_read_blocks 1179 */ 1180 g_io_done = false; 1181 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1182 expected_io->md_buf = md_buf; 1183 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1184 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1185 1186 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1187 expected_io->md_buf = md_buf + 2 * 8; 1188 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1189 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1190 1191 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1192 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1193 14, 8, io_done, NULL); 1194 CU_ASSERT(rc == 0); 1195 CU_ASSERT(g_io_done == false); 1196 1197 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1198 stub_complete_io(2); 1199 CU_ASSERT(g_io_done == true); 1200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1201 1202 /* Now set up a more complex, multi-vector command that needs to be split, 1203 * including splitting iovecs. 1204 */ 1205 iov[0].iov_base = (void *)0x10000; 1206 iov[0].iov_len = 512; 1207 iov[1].iov_base = (void *)0x20000; 1208 iov[1].iov_len = 20 * 512; 1209 iov[2].iov_base = (void *)0x30000; 1210 iov[2].iov_len = 11 * 512; 1211 1212 g_io_done = false; 1213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1214 expected_io->md_buf = md_buf; 1215 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1216 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1217 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1218 1219 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1220 expected_io->md_buf = md_buf + 2 * 8; 1221 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1222 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1223 1224 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1225 expected_io->md_buf = md_buf + 18 * 8; 1226 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1227 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1228 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1229 1230 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1231 14, 32, io_done, NULL); 1232 CU_ASSERT(rc == 0); 1233 CU_ASSERT(g_io_done == false); 1234 1235 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1236 stub_complete_io(3); 1237 CU_ASSERT(g_io_done == true); 1238 1239 /* Test multi vector command that needs to be split by strip and then needs to be 1240 * split further due to the capacity of child iovs. 1241 */ 1242 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1243 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1244 iov[i].iov_len = 512; 1245 } 1246 1247 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1248 g_io_done = false; 1249 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1250 BDEV_IO_NUM_CHILD_IOV); 1251 expected_io->md_buf = md_buf; 1252 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1253 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1254 } 1255 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1256 1257 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1258 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1259 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1260 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1261 ut_expected_io_set_iov(expected_io, i, 1262 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1263 } 1264 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1265 1266 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1267 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1268 CU_ASSERT(rc == 0); 1269 CU_ASSERT(g_io_done == false); 1270 1271 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1272 stub_complete_io(1); 1273 CU_ASSERT(g_io_done == false); 1274 1275 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1276 stub_complete_io(1); 1277 CU_ASSERT(g_io_done == true); 1278 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1279 1280 /* Test multi vector command that needs to be split by strip and then needs to be 1281 * split further due to the capacity of child iovs. In this case, the length of 1282 * the rest of iovec array with an I/O boundary is the multiple of block size. 1283 */ 1284 1285 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1286 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1287 */ 1288 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1289 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1290 iov[i].iov_len = 512; 1291 } 1292 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1293 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1294 iov[i].iov_len = 256; 1295 } 1296 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1297 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1298 1299 /* Add an extra iovec to trigger split */ 1300 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1301 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1302 1303 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1304 g_io_done = false; 1305 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1306 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1307 expected_io->md_buf = md_buf; 1308 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1309 ut_expected_io_set_iov(expected_io, i, 1310 (void *)((i + 1) * 0x10000), 512); 1311 } 1312 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1313 ut_expected_io_set_iov(expected_io, i, 1314 (void *)((i + 1) * 0x10000), 256); 1315 } 1316 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1317 1318 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1319 1, 1); 1320 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1321 ut_expected_io_set_iov(expected_io, 0, 1322 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1323 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1324 1325 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1326 1, 1); 1327 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1328 ut_expected_io_set_iov(expected_io, 0, 1329 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1330 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1331 1332 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1333 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1334 CU_ASSERT(rc == 0); 1335 CU_ASSERT(g_io_done == false); 1336 1337 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1338 stub_complete_io(1); 1339 CU_ASSERT(g_io_done == false); 1340 1341 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1342 stub_complete_io(2); 1343 CU_ASSERT(g_io_done == true); 1344 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1345 1346 /* Test multi vector command that needs to be split by strip and then needs to be 1347 * split further due to the capacity of child iovs, the child request offset should 1348 * be rewind to last aligned offset and go success without error. 1349 */ 1350 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1351 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1352 iov[i].iov_len = 512; 1353 } 1354 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1355 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1356 1357 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1358 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1359 1360 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1361 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1362 1363 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1364 g_io_done = false; 1365 g_io_status = 0; 1366 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1367 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1368 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1369 expected_io->md_buf = md_buf; 1370 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1371 ut_expected_io_set_iov(expected_io, i, 1372 (void *)((i + 1) * 0x10000), 512); 1373 } 1374 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1375 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1376 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1377 1, 2); 1378 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1379 ut_expected_io_set_iov(expected_io, 0, 1380 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1381 ut_expected_io_set_iov(expected_io, 1, 1382 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1383 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1384 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1385 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1386 1, 1); 1387 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1388 ut_expected_io_set_iov(expected_io, 0, 1389 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1390 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1391 1392 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1393 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1394 CU_ASSERT(rc == 0); 1395 CU_ASSERT(g_io_done == false); 1396 1397 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1398 stub_complete_io(1); 1399 CU_ASSERT(g_io_done == false); 1400 1401 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1402 stub_complete_io(2); 1403 CU_ASSERT(g_io_done == true); 1404 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1405 1406 /* Test multi vector command that needs to be split due to the IO boundary and 1407 * the capacity of child iovs. Especially test the case when the command is 1408 * split due to the capacity of child iovs, the tail address is not aligned with 1409 * block size and is rewinded to the aligned address. 1410 * 1411 * The iovecs used in read request is complex but is based on the data 1412 * collected in the real issue. We change the base addresses but keep the lengths 1413 * not to loose the credibility of the test. 1414 */ 1415 bdev->optimal_io_boundary = 128; 1416 g_io_done = false; 1417 g_io_status = 0; 1418 1419 for (i = 0; i < 31; i++) { 1420 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1421 iov[i].iov_len = 1024; 1422 } 1423 iov[31].iov_base = (void *)0xFEED1F00000; 1424 iov[31].iov_len = 32768; 1425 iov[32].iov_base = (void *)0xFEED2000000; 1426 iov[32].iov_len = 160; 1427 iov[33].iov_base = (void *)0xFEED2100000; 1428 iov[33].iov_len = 4096; 1429 iov[34].iov_base = (void *)0xFEED2200000; 1430 iov[34].iov_len = 4096; 1431 iov[35].iov_base = (void *)0xFEED2300000; 1432 iov[35].iov_len = 4096; 1433 iov[36].iov_base = (void *)0xFEED2400000; 1434 iov[36].iov_len = 4096; 1435 iov[37].iov_base = (void *)0xFEED2500000; 1436 iov[37].iov_len = 4096; 1437 iov[38].iov_base = (void *)0xFEED2600000; 1438 iov[38].iov_len = 4096; 1439 iov[39].iov_base = (void *)0xFEED2700000; 1440 iov[39].iov_len = 4096; 1441 iov[40].iov_base = (void *)0xFEED2800000; 1442 iov[40].iov_len = 4096; 1443 iov[41].iov_base = (void *)0xFEED2900000; 1444 iov[41].iov_len = 4096; 1445 iov[42].iov_base = (void *)0xFEED2A00000; 1446 iov[42].iov_len = 4096; 1447 iov[43].iov_base = (void *)0xFEED2B00000; 1448 iov[43].iov_len = 12288; 1449 iov[44].iov_base = (void *)0xFEED2C00000; 1450 iov[44].iov_len = 8192; 1451 iov[45].iov_base = (void *)0xFEED2F00000; 1452 iov[45].iov_len = 4096; 1453 iov[46].iov_base = (void *)0xFEED3000000; 1454 iov[46].iov_len = 4096; 1455 iov[47].iov_base = (void *)0xFEED3100000; 1456 iov[47].iov_len = 4096; 1457 iov[48].iov_base = (void *)0xFEED3200000; 1458 iov[48].iov_len = 24576; 1459 iov[49].iov_base = (void *)0xFEED3300000; 1460 iov[49].iov_len = 16384; 1461 iov[50].iov_base = (void *)0xFEED3400000; 1462 iov[50].iov_len = 12288; 1463 iov[51].iov_base = (void *)0xFEED3500000; 1464 iov[51].iov_len = 4096; 1465 iov[52].iov_base = (void *)0xFEED3600000; 1466 iov[52].iov_len = 4096; 1467 iov[53].iov_base = (void *)0xFEED3700000; 1468 iov[53].iov_len = 4096; 1469 iov[54].iov_base = (void *)0xFEED3800000; 1470 iov[54].iov_len = 28672; 1471 iov[55].iov_base = (void *)0xFEED3900000; 1472 iov[55].iov_len = 20480; 1473 iov[56].iov_base = (void *)0xFEED3A00000; 1474 iov[56].iov_len = 4096; 1475 iov[57].iov_base = (void *)0xFEED3B00000; 1476 iov[57].iov_len = 12288; 1477 iov[58].iov_base = (void *)0xFEED3C00000; 1478 iov[58].iov_len = 4096; 1479 iov[59].iov_base = (void *)0xFEED3D00000; 1480 iov[59].iov_len = 4096; 1481 iov[60].iov_base = (void *)0xFEED3E00000; 1482 iov[60].iov_len = 352; 1483 1484 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1485 * of child iovs, 1486 */ 1487 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1488 expected_io->md_buf = md_buf; 1489 for (i = 0; i < 32; i++) { 1490 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1491 } 1492 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1493 1494 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1495 * split by the IO boundary requirement. 1496 */ 1497 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1498 expected_io->md_buf = md_buf + 126 * 8; 1499 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1500 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1501 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1502 1503 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1504 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1505 */ 1506 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1507 expected_io->md_buf = md_buf + 128 * 8; 1508 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1509 iov[33].iov_len - 864); 1510 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1511 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1512 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1513 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1514 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1515 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1516 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1517 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1518 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1519 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1520 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1521 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1522 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1523 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1524 1525 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1526 * first 864 bytes of iov[52] split by the IO boundary requirement. 1527 */ 1528 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1529 expected_io->md_buf = md_buf + 256 * 8; 1530 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1531 iov[46].iov_len - 864); 1532 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1533 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1534 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1535 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1536 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1537 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1538 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1539 1540 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1541 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1542 */ 1543 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1544 expected_io->md_buf = md_buf + 384 * 8; 1545 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1546 iov[52].iov_len - 864); 1547 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1548 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1549 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1550 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1551 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1552 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1553 1554 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1555 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1556 */ 1557 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1558 expected_io->md_buf = md_buf + 512 * 8; 1559 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1560 iov[57].iov_len - 4960); 1561 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1562 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1563 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1564 1565 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1566 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1567 expected_io->md_buf = md_buf + 542 * 8; 1568 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1569 iov[59].iov_len - 3936); 1570 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1571 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1572 1573 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1574 0, 543, io_done, NULL); 1575 CU_ASSERT(rc == 0); 1576 CU_ASSERT(g_io_done == false); 1577 1578 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1579 stub_complete_io(1); 1580 CU_ASSERT(g_io_done == false); 1581 1582 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1583 stub_complete_io(5); 1584 CU_ASSERT(g_io_done == false); 1585 1586 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1587 stub_complete_io(1); 1588 CU_ASSERT(g_io_done == true); 1589 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1590 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1591 1592 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1593 * split, so test that. 1594 */ 1595 bdev->optimal_io_boundary = 15; 1596 g_io_done = false; 1597 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1598 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1599 1600 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1601 CU_ASSERT(rc == 0); 1602 CU_ASSERT(g_io_done == false); 1603 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1604 stub_complete_io(1); 1605 CU_ASSERT(g_io_done == true); 1606 1607 /* Test an UNMAP. This should also not be split. */ 1608 bdev->optimal_io_boundary = 16; 1609 g_io_done = false; 1610 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1611 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1612 1613 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1614 CU_ASSERT(rc == 0); 1615 CU_ASSERT(g_io_done == false); 1616 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1617 stub_complete_io(1); 1618 CU_ASSERT(g_io_done == true); 1619 1620 /* Test a FLUSH. This should also not be split. */ 1621 bdev->optimal_io_boundary = 16; 1622 g_io_done = false; 1623 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1624 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1625 1626 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1627 CU_ASSERT(rc == 0); 1628 CU_ASSERT(g_io_done == false); 1629 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1630 stub_complete_io(1); 1631 CU_ASSERT(g_io_done == true); 1632 1633 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1634 1635 /* Children requests return an error status */ 1636 bdev->optimal_io_boundary = 16; 1637 iov[0].iov_base = (void *)0x10000; 1638 iov[0].iov_len = 512 * 64; 1639 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1640 g_io_done = false; 1641 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1642 1643 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1644 CU_ASSERT(rc == 0); 1645 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1646 stub_complete_io(4); 1647 CU_ASSERT(g_io_done == false); 1648 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1649 stub_complete_io(1); 1650 CU_ASSERT(g_io_done == true); 1651 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1652 1653 /* Test if a multi vector command terminated with failure before continueing 1654 * splitting process when one of child I/O failed. 1655 * The multi vector command is as same as the above that needs to be split by strip 1656 * and then needs to be split further due to the capacity of child iovs. 1657 */ 1658 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1659 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1660 iov[i].iov_len = 512; 1661 } 1662 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1663 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1664 1665 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1666 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1667 1668 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1669 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1670 1671 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1672 1673 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1674 g_io_done = false; 1675 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1676 1677 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1678 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1679 CU_ASSERT(rc == 0); 1680 CU_ASSERT(g_io_done == false); 1681 1682 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1683 stub_complete_io(1); 1684 CU_ASSERT(g_io_done == true); 1685 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1686 1687 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1688 1689 /* for this test we will create the following conditions to hit the code path where 1690 * we are trying to send and IO following a split that has no iovs because we had to 1691 * trim them for alignment reasons. 1692 * 1693 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1694 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1695 * position 30 and overshoot by 0x2e. 1696 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1697 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1698 * which eliniates that vector so we just send the first split IO with 30 vectors 1699 * and let the completion pick up the last 2 vectors. 1700 */ 1701 bdev->optimal_io_boundary = 32; 1702 bdev->split_on_optimal_io_boundary = true; 1703 g_io_done = false; 1704 1705 /* Init all parent IOVs to 0x212 */ 1706 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1707 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1708 iov[i].iov_len = 0x212; 1709 } 1710 1711 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1712 BDEV_IO_NUM_CHILD_IOV - 1); 1713 /* expect 0-29 to be 1:1 with the parent iov */ 1714 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1715 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1716 } 1717 1718 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1719 * where 0x1e is the amount we overshot the 16K boundary 1720 */ 1721 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1722 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1723 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1724 1725 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1726 * shortened that take it to the next boundary and then a final one to get us to 1727 * 0x4200 bytes for the IO. 1728 */ 1729 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1730 BDEV_IO_NUM_CHILD_IOV, 2); 1731 /* position 30 picked up the remaining bytes to the next boundary */ 1732 ut_expected_io_set_iov(expected_io, 0, 1733 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1734 1735 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1736 ut_expected_io_set_iov(expected_io, 1, 1737 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1738 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1739 1740 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1741 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1742 CU_ASSERT(rc == 0); 1743 CU_ASSERT(g_io_done == false); 1744 1745 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1746 stub_complete_io(1); 1747 CU_ASSERT(g_io_done == false); 1748 1749 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1750 stub_complete_io(1); 1751 CU_ASSERT(g_io_done == true); 1752 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1753 1754 spdk_put_io_channel(io_ch); 1755 spdk_bdev_close(desc); 1756 free_bdev(bdev); 1757 spdk_bdev_finish(bdev_fini_cb, NULL); 1758 poll_threads(); 1759 } 1760 1761 static void 1762 bdev_io_max_size_and_segment_split_test(void) 1763 { 1764 struct spdk_bdev *bdev; 1765 struct spdk_bdev_desc *desc = NULL; 1766 struct spdk_io_channel *io_ch; 1767 struct spdk_bdev_opts bdev_opts = {}; 1768 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1769 struct ut_expected_io *expected_io; 1770 uint64_t i; 1771 int rc; 1772 1773 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1774 bdev_opts.bdev_io_pool_size = 512; 1775 bdev_opts.bdev_io_cache_size = 64; 1776 1777 bdev_opts.opts_size = sizeof(bdev_opts); 1778 rc = spdk_bdev_set_opts(&bdev_opts); 1779 CU_ASSERT(rc == 0); 1780 spdk_bdev_initialize(bdev_init_cb, NULL); 1781 1782 bdev = allocate_bdev("bdev0"); 1783 1784 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1785 CU_ASSERT(rc == 0); 1786 SPDK_CU_ASSERT_FATAL(desc != NULL); 1787 io_ch = spdk_bdev_get_io_channel(desc); 1788 CU_ASSERT(io_ch != NULL); 1789 1790 bdev->split_on_optimal_io_boundary = false; 1791 bdev->optimal_io_boundary = 0; 1792 1793 /* Case 0 max_num_segments == 0. 1794 * but segment size 2 * 512 > 512 1795 */ 1796 bdev->max_segment_size = 512; 1797 bdev->max_num_segments = 0; 1798 g_io_done = false; 1799 1800 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1801 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1802 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1803 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1804 1805 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1806 CU_ASSERT(rc == 0); 1807 CU_ASSERT(g_io_done == false); 1808 1809 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1810 stub_complete_io(1); 1811 CU_ASSERT(g_io_done == true); 1812 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1813 1814 /* Case 1 max_segment_size == 0 1815 * but iov num 2 > 1. 1816 */ 1817 bdev->max_segment_size = 0; 1818 bdev->max_num_segments = 1; 1819 g_io_done = false; 1820 1821 iov[0].iov_base = (void *)0x10000; 1822 iov[0].iov_len = 512; 1823 iov[1].iov_base = (void *)0x20000; 1824 iov[1].iov_len = 8 * 512; 1825 1826 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1827 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1828 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1829 1830 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1831 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1832 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1833 1834 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1835 CU_ASSERT(rc == 0); 1836 CU_ASSERT(g_io_done == false); 1837 1838 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1839 stub_complete_io(2); 1840 CU_ASSERT(g_io_done == true); 1841 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1842 1843 /* Test that a non-vector command is split correctly. 1844 * Set up the expected values before calling spdk_bdev_read_blocks 1845 */ 1846 bdev->max_segment_size = 512; 1847 bdev->max_num_segments = 1; 1848 g_io_done = false; 1849 1850 /* Child IO 0 */ 1851 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1852 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1853 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1854 1855 /* Child IO 1 */ 1856 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1857 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1858 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1859 1860 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1861 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1862 CU_ASSERT(rc == 0); 1863 CU_ASSERT(g_io_done == false); 1864 1865 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1866 stub_complete_io(2); 1867 CU_ASSERT(g_io_done == true); 1868 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1869 1870 /* Now set up a more complex, multi-vector command that needs to be split, 1871 * including splitting iovecs. 1872 */ 1873 bdev->max_segment_size = 2 * 512; 1874 bdev->max_num_segments = 1; 1875 g_io_done = false; 1876 1877 iov[0].iov_base = (void *)0x10000; 1878 iov[0].iov_len = 2 * 512; 1879 iov[1].iov_base = (void *)0x20000; 1880 iov[1].iov_len = 4 * 512; 1881 iov[2].iov_base = (void *)0x30000; 1882 iov[2].iov_len = 6 * 512; 1883 1884 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1885 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1886 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1887 1888 /* Split iov[1].size to 2 iov entries then split the segments */ 1889 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1890 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1891 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1892 1893 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1894 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1895 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1896 1897 /* Split iov[2].size to 3 iov entries then split the segments */ 1898 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1899 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1900 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1901 1902 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1903 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1904 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1905 1906 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1907 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1908 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1909 1910 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1911 CU_ASSERT(rc == 0); 1912 CU_ASSERT(g_io_done == false); 1913 1914 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 1915 stub_complete_io(6); 1916 CU_ASSERT(g_io_done == true); 1917 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1918 1919 /* Test multi vector command that needs to be split by strip and then needs to be 1920 * split further due to the capacity of parent IO child iovs. 1921 */ 1922 bdev->max_segment_size = 512; 1923 bdev->max_num_segments = 1; 1924 g_io_done = false; 1925 1926 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1927 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1928 iov[i].iov_len = 512 * 2; 1929 } 1930 1931 /* Each input iov.size is split into 2 iovs, 1932 * half of the input iov can fill all child iov entries of a single IO. 1933 */ 1934 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 1935 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 1936 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1937 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1938 1939 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 1940 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1941 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1942 } 1943 1944 /* The remaining iov is split in the second round */ 1945 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1946 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 1947 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1948 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1949 1950 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 1951 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1952 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1953 } 1954 1955 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 1956 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1957 CU_ASSERT(rc == 0); 1958 CU_ASSERT(g_io_done == false); 1959 1960 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1961 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1962 CU_ASSERT(g_io_done == false); 1963 1964 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1965 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1966 CU_ASSERT(g_io_done == true); 1967 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1968 1969 /* A wrong case, a child IO that is divided does 1970 * not meet the principle of multiples of block size, 1971 * and exits with error 1972 */ 1973 bdev->max_segment_size = 512; 1974 bdev->max_num_segments = 1; 1975 g_io_done = false; 1976 1977 iov[0].iov_base = (void *)0x10000; 1978 iov[0].iov_len = 512 + 256; 1979 iov[1].iov_base = (void *)0x20000; 1980 iov[1].iov_len = 256; 1981 1982 /* iov[0] is split to 512 and 256. 1983 * 256 is less than a block size, and it is found 1984 * in the next round of split that it is the first child IO smaller than 1985 * the block size, so the error exit 1986 */ 1987 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 1988 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 1989 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1990 1991 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 1992 CU_ASSERT(rc == 0); 1993 CU_ASSERT(g_io_done == false); 1994 1995 /* First child IO is OK */ 1996 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1997 stub_complete_io(1); 1998 CU_ASSERT(g_io_done == true); 1999 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2000 2001 /* error exit */ 2002 stub_complete_io(1); 2003 CU_ASSERT(g_io_done == true); 2004 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2006 2007 /* Test multi vector command that needs to be split by strip and then needs to be 2008 * split further due to the capacity of child iovs. 2009 * 2010 * In this case, the last two iovs need to be split, but it will exceed the capacity 2011 * of child iovs, so it needs to wait until the first batch completed. 2012 */ 2013 bdev->max_segment_size = 512; 2014 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2015 g_io_done = false; 2016 2017 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2018 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2019 iov[i].iov_len = 512; 2020 } 2021 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2022 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2023 iov[i].iov_len = 512 * 2; 2024 } 2025 2026 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2027 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2028 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2029 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2030 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2031 } 2032 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2033 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2034 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2035 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2036 2037 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2038 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2039 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2040 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2041 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2042 2043 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2044 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2045 CU_ASSERT(rc == 0); 2046 CU_ASSERT(g_io_done == false); 2047 2048 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2049 stub_complete_io(1); 2050 CU_ASSERT(g_io_done == false); 2051 2052 /* Next round */ 2053 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2054 stub_complete_io(1); 2055 CU_ASSERT(g_io_done == true); 2056 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2057 2058 /* This case is similar to the previous one, but the io composed of 2059 * the last few entries of child iov is not enough for a blocklen, so they 2060 * cannot be put into this IO, but wait until the next time. 2061 */ 2062 bdev->max_segment_size = 512; 2063 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2064 g_io_done = false; 2065 2066 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2067 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2068 iov[i].iov_len = 512; 2069 } 2070 2071 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2072 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2073 iov[i].iov_len = 128; 2074 } 2075 2076 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2077 * Because the left 2 iov is not enough for a blocklen. 2078 */ 2079 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2080 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2081 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2082 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2083 } 2084 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2085 2086 /* The second child io waits until the end of the first child io before executing. 2087 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2088 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2089 */ 2090 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2091 1, 4); 2092 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2093 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2094 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2095 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2096 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2097 2098 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2099 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2100 CU_ASSERT(rc == 0); 2101 CU_ASSERT(g_io_done == false); 2102 2103 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2104 stub_complete_io(1); 2105 CU_ASSERT(g_io_done == false); 2106 2107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2108 stub_complete_io(1); 2109 CU_ASSERT(g_io_done == true); 2110 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2111 2112 /* A very complicated case. Each sg entry exceeds max_segment_size and 2113 * needs to be split. At the same time, child io must be a multiple of blocklen. 2114 * At the same time, child iovcnt exceeds parent iovcnt. 2115 */ 2116 bdev->max_segment_size = 512 + 128; 2117 bdev->max_num_segments = 3; 2118 g_io_done = false; 2119 2120 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2121 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2122 iov[i].iov_len = 512 + 256; 2123 } 2124 2125 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2126 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2127 iov[i].iov_len = 512 + 128; 2128 } 2129 2130 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2131 * Consume 4 parent IO iov entries per for() round and 6 block size. 2132 * Generate 9 child IOs. 2133 */ 2134 for (i = 0; i < 3; i++) { 2135 uint32_t j = i * 4; 2136 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2137 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2138 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2139 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2140 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2141 2142 /* Child io must be a multiple of blocklen 2143 * iov[j + 2] must be split. If the third entry is also added, 2144 * the multiple of blocklen cannot be guaranteed. But it still 2145 * occupies one iov entry of the parent child iov. 2146 */ 2147 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2148 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2149 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2150 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2151 2152 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2153 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2154 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2155 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2156 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2157 } 2158 2159 /* Child iov position at 27, the 10th child IO 2160 * iov entry index is 3 * 4 and offset is 3 * 6 2161 */ 2162 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2163 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2164 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2165 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2166 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2167 2168 /* Child iov position at 30, the 11th child IO */ 2169 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2170 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2171 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2172 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2173 2174 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2175 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2176 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2177 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2178 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2179 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2180 2181 /* Consume 9 child IOs and 27 child iov entries. 2182 * Consume 4 parent IO iov entries per for() round and 6 block size. 2183 * Parent IO iov index start from 16 and block offset start from 24 2184 */ 2185 for (i = 0; i < 3; i++) { 2186 uint32_t j = i * 4 + 16; 2187 uint32_t offset = i * 6 + 24; 2188 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2189 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2190 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2191 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2192 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2193 2194 /* Child io must be a multiple of blocklen 2195 * iov[j + 2] must be split. If the third entry is also added, 2196 * the multiple of blocklen cannot be guaranteed. But it still 2197 * occupies one iov entry of the parent child iov. 2198 */ 2199 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2200 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2201 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2202 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2203 2204 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2205 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2206 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2207 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2208 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2209 } 2210 2211 /* The 22th child IO, child iov position at 30 */ 2212 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2213 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2214 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2215 2216 /* The third round */ 2217 /* Here is the 23nd child IO and child iovpos is 0 */ 2218 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2219 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2220 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2221 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2222 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2223 2224 /* The 24th child IO */ 2225 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2226 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2227 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2228 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2229 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2230 2231 /* The 25th child IO */ 2232 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2233 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2234 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2235 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2236 2237 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2238 50, io_done, NULL); 2239 CU_ASSERT(rc == 0); 2240 CU_ASSERT(g_io_done == false); 2241 2242 /* Parent IO supports up to 32 child iovs, so it is calculated that 2243 * a maximum of 11 IOs can be split at a time, and the 2244 * splitting will continue after the first batch is over. 2245 */ 2246 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2247 stub_complete_io(11); 2248 CU_ASSERT(g_io_done == false); 2249 2250 /* The 2nd round */ 2251 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2252 stub_complete_io(11); 2253 CU_ASSERT(g_io_done == false); 2254 2255 /* The last round */ 2256 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2257 stub_complete_io(3); 2258 CU_ASSERT(g_io_done == true); 2259 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2260 2261 /* Test an WRITE_ZEROES. This should also not be split. */ 2262 bdev->max_segment_size = 512; 2263 bdev->max_num_segments = 1; 2264 g_io_done = false; 2265 2266 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2267 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2268 2269 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2270 CU_ASSERT(rc == 0); 2271 CU_ASSERT(g_io_done == false); 2272 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2273 stub_complete_io(1); 2274 CU_ASSERT(g_io_done == true); 2275 2276 /* Test an UNMAP. This should also not be split. */ 2277 g_io_done = false; 2278 2279 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2280 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2281 2282 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2283 CU_ASSERT(rc == 0); 2284 CU_ASSERT(g_io_done == false); 2285 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2286 stub_complete_io(1); 2287 CU_ASSERT(g_io_done == true); 2288 2289 /* Test a FLUSH. This should also not be split. */ 2290 g_io_done = false; 2291 2292 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2293 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2294 2295 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2296 CU_ASSERT(rc == 0); 2297 CU_ASSERT(g_io_done == false); 2298 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2299 stub_complete_io(1); 2300 CU_ASSERT(g_io_done == true); 2301 2302 spdk_put_io_channel(io_ch); 2303 spdk_bdev_close(desc); 2304 free_bdev(bdev); 2305 spdk_bdev_finish(bdev_fini_cb, NULL); 2306 poll_threads(); 2307 } 2308 2309 static void 2310 bdev_io_mix_split_test(void) 2311 { 2312 struct spdk_bdev *bdev; 2313 struct spdk_bdev_desc *desc = NULL; 2314 struct spdk_io_channel *io_ch; 2315 struct spdk_bdev_opts bdev_opts = {}; 2316 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2317 struct ut_expected_io *expected_io; 2318 uint64_t i; 2319 int rc; 2320 2321 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2322 bdev_opts.bdev_io_pool_size = 512; 2323 bdev_opts.bdev_io_cache_size = 64; 2324 2325 rc = spdk_bdev_set_opts(&bdev_opts); 2326 CU_ASSERT(rc == 0); 2327 spdk_bdev_initialize(bdev_init_cb, NULL); 2328 2329 bdev = allocate_bdev("bdev0"); 2330 2331 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2332 CU_ASSERT(rc == 0); 2333 SPDK_CU_ASSERT_FATAL(desc != NULL); 2334 io_ch = spdk_bdev_get_io_channel(desc); 2335 CU_ASSERT(io_ch != NULL); 2336 2337 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2338 bdev->split_on_optimal_io_boundary = true; 2339 bdev->optimal_io_boundary = 16; 2340 2341 bdev->max_segment_size = 512; 2342 bdev->max_num_segments = 16; 2343 g_io_done = false; 2344 2345 /* IO crossing the IO boundary requires split 2346 * Total 2 child IOs. 2347 */ 2348 2349 /* The 1st child IO split the segment_size to multiple segment entry */ 2350 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2351 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2352 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2353 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2354 2355 /* The 2nd child IO split the segment_size to multiple segment entry */ 2356 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2357 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2358 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2359 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2360 2361 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2362 CU_ASSERT(rc == 0); 2363 CU_ASSERT(g_io_done == false); 2364 2365 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2366 stub_complete_io(2); 2367 CU_ASSERT(g_io_done == true); 2368 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2369 2370 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2371 bdev->max_segment_size = 15 * 512; 2372 bdev->max_num_segments = 1; 2373 g_io_done = false; 2374 2375 /* IO crossing the IO boundary requires split. 2376 * The 1st child IO segment size exceeds the max_segment_size, 2377 * So 1st child IO will be splitted to multiple segment entry. 2378 * Then it split to 2 child IOs because of the max_num_segments. 2379 * Total 3 child IOs. 2380 */ 2381 2382 /* The first 2 IOs are in an IO boundary. 2383 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2384 * So it split to the first 2 IOs. 2385 */ 2386 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2387 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2388 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2389 2390 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2391 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2392 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2393 2394 /* The 3rd Child IO is because of the io boundary */ 2395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2396 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2397 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2398 2399 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2400 CU_ASSERT(rc == 0); 2401 CU_ASSERT(g_io_done == false); 2402 2403 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2404 stub_complete_io(3); 2405 CU_ASSERT(g_io_done == true); 2406 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2407 2408 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2409 bdev->max_segment_size = 17 * 512; 2410 bdev->max_num_segments = 1; 2411 g_io_done = false; 2412 2413 /* IO crossing the IO boundary requires split. 2414 * Child IO does not split. 2415 * Total 2 child IOs. 2416 */ 2417 2418 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2419 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2420 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2421 2422 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2423 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2425 2426 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2427 CU_ASSERT(rc == 0); 2428 CU_ASSERT(g_io_done == false); 2429 2430 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2431 stub_complete_io(2); 2432 CU_ASSERT(g_io_done == true); 2433 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2434 2435 /* Now set up a more complex, multi-vector command that needs to be split, 2436 * including splitting iovecs. 2437 * optimal_io_boundary < max_segment_size * max_num_segments 2438 */ 2439 bdev->max_segment_size = 3 * 512; 2440 bdev->max_num_segments = 6; 2441 g_io_done = false; 2442 2443 iov[0].iov_base = (void *)0x10000; 2444 iov[0].iov_len = 4 * 512; 2445 iov[1].iov_base = (void *)0x20000; 2446 iov[1].iov_len = 4 * 512; 2447 iov[2].iov_base = (void *)0x30000; 2448 iov[2].iov_len = 10 * 512; 2449 2450 /* IO crossing the IO boundary requires split. 2451 * The 1st child IO segment size exceeds the max_segment_size and after 2452 * splitting segment_size, the num_segments exceeds max_num_segments. 2453 * So 1st child IO will be splitted to 2 child IOs. 2454 * Total 3 child IOs. 2455 */ 2456 2457 /* The first 2 IOs are in an IO boundary. 2458 * After splitting segmemt size the segment num exceeds. 2459 * So it splits to 2 child IOs. 2460 */ 2461 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2462 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2463 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2464 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2465 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2466 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2467 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2468 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2469 2470 /* The 2nd child IO has the left segment entry */ 2471 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2472 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2473 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2474 2475 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2476 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2477 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2478 2479 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2480 CU_ASSERT(rc == 0); 2481 CU_ASSERT(g_io_done == false); 2482 2483 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2484 stub_complete_io(3); 2485 CU_ASSERT(g_io_done == true); 2486 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2487 2488 /* A very complicated case. Each sg entry exceeds max_segment_size 2489 * and split on io boundary. 2490 * optimal_io_boundary < max_segment_size * max_num_segments 2491 */ 2492 bdev->max_segment_size = 3 * 512; 2493 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2494 g_io_done = false; 2495 2496 for (i = 0; i < 20; i++) { 2497 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2498 iov[i].iov_len = 512 * 4; 2499 } 2500 2501 /* IO crossing the IO boundary requires split. 2502 * 80 block length can split 5 child IOs base on offset and IO boundary. 2503 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2504 * Total 5 child IOs. 2505 */ 2506 2507 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2508 * So each child IO occupies 8 child iov entries. 2509 */ 2510 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2511 for (i = 0; i < 4; i++) { 2512 int iovcnt = i * 2; 2513 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2514 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2515 } 2516 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2517 2518 /* 2nd child IO and total 16 child iov entries of parent IO */ 2519 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2520 for (i = 4; i < 8; i++) { 2521 int iovcnt = (i - 4) * 2; 2522 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2523 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2524 } 2525 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2526 2527 /* 3rd child IO and total 24 child iov entries of parent IO */ 2528 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2529 for (i = 8; i < 12; i++) { 2530 int iovcnt = (i - 8) * 2; 2531 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2532 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2533 } 2534 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2535 2536 /* 4th child IO and total 32 child iov entries of parent IO */ 2537 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2538 for (i = 12; i < 16; i++) { 2539 int iovcnt = (i - 12) * 2; 2540 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2541 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2542 } 2543 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2544 2545 /* 5th child IO and because of the child iov entry it should be splitted 2546 * in next round. 2547 */ 2548 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2549 for (i = 16; i < 20; i++) { 2550 int iovcnt = (i - 16) * 2; 2551 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2552 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2553 } 2554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2555 2556 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2557 CU_ASSERT(rc == 0); 2558 CU_ASSERT(g_io_done == false); 2559 2560 /* First split round */ 2561 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2562 stub_complete_io(4); 2563 CU_ASSERT(g_io_done == false); 2564 2565 /* Second split round */ 2566 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2567 stub_complete_io(1); 2568 CU_ASSERT(g_io_done == true); 2569 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2570 2571 spdk_put_io_channel(io_ch); 2572 spdk_bdev_close(desc); 2573 free_bdev(bdev); 2574 spdk_bdev_finish(bdev_fini_cb, NULL); 2575 poll_threads(); 2576 } 2577 2578 static void 2579 bdev_io_split_with_io_wait(void) 2580 { 2581 struct spdk_bdev *bdev; 2582 struct spdk_bdev_desc *desc = NULL; 2583 struct spdk_io_channel *io_ch; 2584 struct spdk_bdev_channel *channel; 2585 struct spdk_bdev_mgmt_channel *mgmt_ch; 2586 struct spdk_bdev_opts bdev_opts = {}; 2587 struct iovec iov[3]; 2588 struct ut_expected_io *expected_io; 2589 int rc; 2590 2591 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2592 bdev_opts.bdev_io_pool_size = 2; 2593 bdev_opts.bdev_io_cache_size = 1; 2594 2595 rc = spdk_bdev_set_opts(&bdev_opts); 2596 CU_ASSERT(rc == 0); 2597 spdk_bdev_initialize(bdev_init_cb, NULL); 2598 2599 bdev = allocate_bdev("bdev0"); 2600 2601 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2602 CU_ASSERT(rc == 0); 2603 CU_ASSERT(desc != NULL); 2604 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2605 io_ch = spdk_bdev_get_io_channel(desc); 2606 CU_ASSERT(io_ch != NULL); 2607 channel = spdk_io_channel_get_ctx(io_ch); 2608 mgmt_ch = channel->shared_resource->mgmt_ch; 2609 2610 bdev->optimal_io_boundary = 16; 2611 bdev->split_on_optimal_io_boundary = true; 2612 2613 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2614 CU_ASSERT(rc == 0); 2615 2616 /* Now test that a single-vector command is split correctly. 2617 * Offset 14, length 8, payload 0xF000 2618 * Child - Offset 14, length 2, payload 0xF000 2619 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2620 * 2621 * Set up the expected values before calling spdk_bdev_read_blocks 2622 */ 2623 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2624 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2625 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2626 2627 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2628 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2629 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2630 2631 /* The following children will be submitted sequentially due to the capacity of 2632 * spdk_bdev_io. 2633 */ 2634 2635 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2636 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2637 CU_ASSERT(rc == 0); 2638 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2639 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2640 2641 /* Completing the first read I/O will submit the first child */ 2642 stub_complete_io(1); 2643 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2644 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2645 2646 /* Completing the first child will submit the second child */ 2647 stub_complete_io(1); 2648 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2649 2650 /* Complete the second child I/O. This should result in our callback getting 2651 * invoked since the parent I/O is now complete. 2652 */ 2653 stub_complete_io(1); 2654 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2655 2656 /* Now set up a more complex, multi-vector command that needs to be split, 2657 * including splitting iovecs. 2658 */ 2659 iov[0].iov_base = (void *)0x10000; 2660 iov[0].iov_len = 512; 2661 iov[1].iov_base = (void *)0x20000; 2662 iov[1].iov_len = 20 * 512; 2663 iov[2].iov_base = (void *)0x30000; 2664 iov[2].iov_len = 11 * 512; 2665 2666 g_io_done = false; 2667 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2668 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2669 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2670 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2671 2672 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2673 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2674 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2675 2676 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2677 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2678 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2679 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2680 2681 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2682 CU_ASSERT(rc == 0); 2683 CU_ASSERT(g_io_done == false); 2684 2685 /* The following children will be submitted sequentially due to the capacity of 2686 * spdk_bdev_io. 2687 */ 2688 2689 /* Completing the first child will submit the second child */ 2690 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2691 stub_complete_io(1); 2692 CU_ASSERT(g_io_done == false); 2693 2694 /* Completing the second child will submit the third child */ 2695 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2696 stub_complete_io(1); 2697 CU_ASSERT(g_io_done == false); 2698 2699 /* Completing the third child will result in our callback getting invoked 2700 * since the parent I/O is now complete. 2701 */ 2702 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2703 stub_complete_io(1); 2704 CU_ASSERT(g_io_done == true); 2705 2706 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2707 2708 spdk_put_io_channel(io_ch); 2709 spdk_bdev_close(desc); 2710 free_bdev(bdev); 2711 spdk_bdev_finish(bdev_fini_cb, NULL); 2712 poll_threads(); 2713 } 2714 2715 static void 2716 bdev_io_alignment(void) 2717 { 2718 struct spdk_bdev *bdev; 2719 struct spdk_bdev_desc *desc = NULL; 2720 struct spdk_io_channel *io_ch; 2721 struct spdk_bdev_opts bdev_opts = {}; 2722 int rc; 2723 void *buf = NULL; 2724 struct iovec iovs[2]; 2725 int iovcnt; 2726 uint64_t alignment; 2727 2728 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2729 bdev_opts.bdev_io_pool_size = 20; 2730 bdev_opts.bdev_io_cache_size = 2; 2731 2732 rc = spdk_bdev_set_opts(&bdev_opts); 2733 CU_ASSERT(rc == 0); 2734 spdk_bdev_initialize(bdev_init_cb, NULL); 2735 2736 fn_table.submit_request = stub_submit_request_get_buf; 2737 bdev = allocate_bdev("bdev0"); 2738 2739 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2740 CU_ASSERT(rc == 0); 2741 CU_ASSERT(desc != NULL); 2742 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2743 io_ch = spdk_bdev_get_io_channel(desc); 2744 CU_ASSERT(io_ch != NULL); 2745 2746 /* Create aligned buffer */ 2747 rc = posix_memalign(&buf, 4096, 8192); 2748 SPDK_CU_ASSERT_FATAL(rc == 0); 2749 2750 /* Pass aligned single buffer with no alignment required */ 2751 alignment = 1; 2752 bdev->required_alignment = spdk_u32log2(alignment); 2753 2754 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2755 CU_ASSERT(rc == 0); 2756 stub_complete_io(1); 2757 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2758 alignment)); 2759 2760 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2761 CU_ASSERT(rc == 0); 2762 stub_complete_io(1); 2763 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2764 alignment)); 2765 2766 /* Pass unaligned single buffer with no alignment required */ 2767 alignment = 1; 2768 bdev->required_alignment = spdk_u32log2(alignment); 2769 2770 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2771 CU_ASSERT(rc == 0); 2772 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2773 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2774 stub_complete_io(1); 2775 2776 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2777 CU_ASSERT(rc == 0); 2778 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2779 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2780 stub_complete_io(1); 2781 2782 /* Pass unaligned single buffer with 512 alignment required */ 2783 alignment = 512; 2784 bdev->required_alignment = spdk_u32log2(alignment); 2785 2786 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2787 CU_ASSERT(rc == 0); 2788 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2789 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2790 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2791 alignment)); 2792 stub_complete_io(1); 2793 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2794 2795 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2796 CU_ASSERT(rc == 0); 2797 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2798 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2799 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2800 alignment)); 2801 stub_complete_io(1); 2802 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2803 2804 /* Pass unaligned single buffer with 4096 alignment required */ 2805 alignment = 4096; 2806 bdev->required_alignment = spdk_u32log2(alignment); 2807 2808 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2809 CU_ASSERT(rc == 0); 2810 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2811 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2812 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2813 alignment)); 2814 stub_complete_io(1); 2815 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2816 2817 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2818 CU_ASSERT(rc == 0); 2819 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2820 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2821 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2822 alignment)); 2823 stub_complete_io(1); 2824 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2825 2826 /* Pass aligned iovs with no alignment required */ 2827 alignment = 1; 2828 bdev->required_alignment = spdk_u32log2(alignment); 2829 2830 iovcnt = 1; 2831 iovs[0].iov_base = buf; 2832 iovs[0].iov_len = 512; 2833 2834 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2835 CU_ASSERT(rc == 0); 2836 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2837 stub_complete_io(1); 2838 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2839 2840 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2841 CU_ASSERT(rc == 0); 2842 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2843 stub_complete_io(1); 2844 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2845 2846 /* Pass unaligned iovs with no alignment required */ 2847 alignment = 1; 2848 bdev->required_alignment = spdk_u32log2(alignment); 2849 2850 iovcnt = 2; 2851 iovs[0].iov_base = buf + 16; 2852 iovs[0].iov_len = 256; 2853 iovs[1].iov_base = buf + 16 + 256 + 32; 2854 iovs[1].iov_len = 256; 2855 2856 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2857 CU_ASSERT(rc == 0); 2858 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2859 stub_complete_io(1); 2860 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2861 2862 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2863 CU_ASSERT(rc == 0); 2864 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2865 stub_complete_io(1); 2866 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2867 2868 /* Pass unaligned iov with 2048 alignment required */ 2869 alignment = 2048; 2870 bdev->required_alignment = spdk_u32log2(alignment); 2871 2872 iovcnt = 2; 2873 iovs[0].iov_base = buf + 16; 2874 iovs[0].iov_len = 256; 2875 iovs[1].iov_base = buf + 16 + 256 + 32; 2876 iovs[1].iov_len = 256; 2877 2878 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2879 CU_ASSERT(rc == 0); 2880 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2881 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2882 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2883 alignment)); 2884 stub_complete_io(1); 2885 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2886 2887 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2888 CU_ASSERT(rc == 0); 2889 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2890 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2891 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2892 alignment)); 2893 stub_complete_io(1); 2894 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2895 2896 /* Pass iov without allocated buffer without alignment required */ 2897 alignment = 1; 2898 bdev->required_alignment = spdk_u32log2(alignment); 2899 2900 iovcnt = 1; 2901 iovs[0].iov_base = NULL; 2902 iovs[0].iov_len = 0; 2903 2904 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2905 CU_ASSERT(rc == 0); 2906 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2907 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2908 alignment)); 2909 stub_complete_io(1); 2910 2911 /* Pass iov without allocated buffer with 1024 alignment required */ 2912 alignment = 1024; 2913 bdev->required_alignment = spdk_u32log2(alignment); 2914 2915 iovcnt = 1; 2916 iovs[0].iov_base = NULL; 2917 iovs[0].iov_len = 0; 2918 2919 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2920 CU_ASSERT(rc == 0); 2921 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2922 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2923 alignment)); 2924 stub_complete_io(1); 2925 2926 spdk_put_io_channel(io_ch); 2927 spdk_bdev_close(desc); 2928 free_bdev(bdev); 2929 fn_table.submit_request = stub_submit_request; 2930 spdk_bdev_finish(bdev_fini_cb, NULL); 2931 poll_threads(); 2932 2933 free(buf); 2934 } 2935 2936 static void 2937 bdev_io_alignment_with_boundary(void) 2938 { 2939 struct spdk_bdev *bdev; 2940 struct spdk_bdev_desc *desc = NULL; 2941 struct spdk_io_channel *io_ch; 2942 struct spdk_bdev_opts bdev_opts = {}; 2943 int rc; 2944 void *buf = NULL; 2945 struct iovec iovs[2]; 2946 int iovcnt; 2947 uint64_t alignment; 2948 2949 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2950 bdev_opts.bdev_io_pool_size = 20; 2951 bdev_opts.bdev_io_cache_size = 2; 2952 2953 bdev_opts.opts_size = sizeof(bdev_opts); 2954 rc = spdk_bdev_set_opts(&bdev_opts); 2955 CU_ASSERT(rc == 0); 2956 spdk_bdev_initialize(bdev_init_cb, NULL); 2957 2958 fn_table.submit_request = stub_submit_request_get_buf; 2959 bdev = allocate_bdev("bdev0"); 2960 2961 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2962 CU_ASSERT(rc == 0); 2963 CU_ASSERT(desc != NULL); 2964 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2965 io_ch = spdk_bdev_get_io_channel(desc); 2966 CU_ASSERT(io_ch != NULL); 2967 2968 /* Create aligned buffer */ 2969 rc = posix_memalign(&buf, 4096, 131072); 2970 SPDK_CU_ASSERT_FATAL(rc == 0); 2971 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2972 2973 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2974 alignment = 512; 2975 bdev->required_alignment = spdk_u32log2(alignment); 2976 bdev->optimal_io_boundary = 2; 2977 bdev->split_on_optimal_io_boundary = true; 2978 2979 iovcnt = 1; 2980 iovs[0].iov_base = NULL; 2981 iovs[0].iov_len = 512 * 3; 2982 2983 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2984 CU_ASSERT(rc == 0); 2985 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2986 stub_complete_io(2); 2987 2988 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2989 alignment = 512; 2990 bdev->required_alignment = spdk_u32log2(alignment); 2991 bdev->optimal_io_boundary = 16; 2992 bdev->split_on_optimal_io_boundary = true; 2993 2994 iovcnt = 1; 2995 iovs[0].iov_base = NULL; 2996 iovs[0].iov_len = 512 * 16; 2997 2998 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2999 CU_ASSERT(rc == 0); 3000 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3001 stub_complete_io(2); 3002 3003 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3004 alignment = 512; 3005 bdev->required_alignment = spdk_u32log2(alignment); 3006 bdev->optimal_io_boundary = 128; 3007 bdev->split_on_optimal_io_boundary = true; 3008 3009 iovcnt = 1; 3010 iovs[0].iov_base = buf + 16; 3011 iovs[0].iov_len = 512 * 160; 3012 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3013 CU_ASSERT(rc == 0); 3014 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3015 stub_complete_io(2); 3016 3017 /* 512 * 3 with 2 IO boundary */ 3018 alignment = 512; 3019 bdev->required_alignment = spdk_u32log2(alignment); 3020 bdev->optimal_io_boundary = 2; 3021 bdev->split_on_optimal_io_boundary = true; 3022 3023 iovcnt = 2; 3024 iovs[0].iov_base = buf + 16; 3025 iovs[0].iov_len = 512; 3026 iovs[1].iov_base = buf + 16 + 512 + 32; 3027 iovs[1].iov_len = 1024; 3028 3029 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3030 CU_ASSERT(rc == 0); 3031 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3032 stub_complete_io(2); 3033 3034 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3035 CU_ASSERT(rc == 0); 3036 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3037 stub_complete_io(2); 3038 3039 /* 512 * 64 with 32 IO boundary */ 3040 bdev->optimal_io_boundary = 32; 3041 iovcnt = 2; 3042 iovs[0].iov_base = buf + 16; 3043 iovs[0].iov_len = 16384; 3044 iovs[1].iov_base = buf + 16 + 16384 + 32; 3045 iovs[1].iov_len = 16384; 3046 3047 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3048 CU_ASSERT(rc == 0); 3049 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3050 stub_complete_io(3); 3051 3052 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3053 CU_ASSERT(rc == 0); 3054 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3055 stub_complete_io(3); 3056 3057 /* 512 * 160 with 32 IO boundary */ 3058 iovcnt = 1; 3059 iovs[0].iov_base = buf + 16; 3060 iovs[0].iov_len = 16384 + 65536; 3061 3062 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3063 CU_ASSERT(rc == 0); 3064 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3065 stub_complete_io(6); 3066 3067 spdk_put_io_channel(io_ch); 3068 spdk_bdev_close(desc); 3069 free_bdev(bdev); 3070 fn_table.submit_request = stub_submit_request; 3071 spdk_bdev_finish(bdev_fini_cb, NULL); 3072 poll_threads(); 3073 3074 free(buf); 3075 } 3076 3077 static void 3078 histogram_status_cb(void *cb_arg, int status) 3079 { 3080 g_status = status; 3081 } 3082 3083 static void 3084 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3085 { 3086 g_status = status; 3087 g_histogram = histogram; 3088 } 3089 3090 static void 3091 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3092 uint64_t total, uint64_t so_far) 3093 { 3094 g_count += count; 3095 } 3096 3097 static void 3098 bdev_histograms(void) 3099 { 3100 struct spdk_bdev *bdev; 3101 struct spdk_bdev_desc *desc = NULL; 3102 struct spdk_io_channel *ch; 3103 struct spdk_histogram_data *histogram; 3104 uint8_t buf[4096]; 3105 int rc; 3106 3107 spdk_bdev_initialize(bdev_init_cb, NULL); 3108 3109 bdev = allocate_bdev("bdev"); 3110 3111 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3112 CU_ASSERT(rc == 0); 3113 CU_ASSERT(desc != NULL); 3114 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3115 3116 ch = spdk_bdev_get_io_channel(desc); 3117 CU_ASSERT(ch != NULL); 3118 3119 /* Enable histogram */ 3120 g_status = -1; 3121 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3122 poll_threads(); 3123 CU_ASSERT(g_status == 0); 3124 CU_ASSERT(bdev->internal.histogram_enabled == true); 3125 3126 /* Allocate histogram */ 3127 histogram = spdk_histogram_data_alloc(); 3128 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3129 3130 /* Check if histogram is zeroed */ 3131 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3132 poll_threads(); 3133 CU_ASSERT(g_status == 0); 3134 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3135 3136 g_count = 0; 3137 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3138 3139 CU_ASSERT(g_count == 0); 3140 3141 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3142 CU_ASSERT(rc == 0); 3143 3144 spdk_delay_us(10); 3145 stub_complete_io(1); 3146 poll_threads(); 3147 3148 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3149 CU_ASSERT(rc == 0); 3150 3151 spdk_delay_us(10); 3152 stub_complete_io(1); 3153 poll_threads(); 3154 3155 /* Check if histogram gathered data from all I/O channels */ 3156 g_histogram = NULL; 3157 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3158 poll_threads(); 3159 CU_ASSERT(g_status == 0); 3160 CU_ASSERT(bdev->internal.histogram_enabled == true); 3161 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3162 3163 g_count = 0; 3164 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3165 CU_ASSERT(g_count == 2); 3166 3167 /* Disable histogram */ 3168 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3169 poll_threads(); 3170 CU_ASSERT(g_status == 0); 3171 CU_ASSERT(bdev->internal.histogram_enabled == false); 3172 3173 /* Try to run histogram commands on disabled bdev */ 3174 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3175 poll_threads(); 3176 CU_ASSERT(g_status == -EFAULT); 3177 3178 spdk_histogram_data_free(histogram); 3179 spdk_put_io_channel(ch); 3180 spdk_bdev_close(desc); 3181 free_bdev(bdev); 3182 spdk_bdev_finish(bdev_fini_cb, NULL); 3183 poll_threads(); 3184 } 3185 3186 static void 3187 _bdev_compare(bool emulated) 3188 { 3189 struct spdk_bdev *bdev; 3190 struct spdk_bdev_desc *desc = NULL; 3191 struct spdk_io_channel *ioch; 3192 struct ut_expected_io *expected_io; 3193 uint64_t offset, num_blocks; 3194 uint32_t num_completed; 3195 char aa_buf[512]; 3196 char bb_buf[512]; 3197 struct iovec compare_iov; 3198 uint8_t io_type; 3199 int rc; 3200 3201 if (emulated) { 3202 io_type = SPDK_BDEV_IO_TYPE_READ; 3203 } else { 3204 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3205 } 3206 3207 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3208 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3209 3210 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3211 3212 spdk_bdev_initialize(bdev_init_cb, NULL); 3213 fn_table.submit_request = stub_submit_request_get_buf; 3214 bdev = allocate_bdev("bdev"); 3215 3216 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3217 CU_ASSERT_EQUAL(rc, 0); 3218 SPDK_CU_ASSERT_FATAL(desc != NULL); 3219 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3220 ioch = spdk_bdev_get_io_channel(desc); 3221 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3222 3223 fn_table.submit_request = stub_submit_request_get_buf; 3224 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3225 3226 offset = 50; 3227 num_blocks = 1; 3228 compare_iov.iov_base = aa_buf; 3229 compare_iov.iov_len = sizeof(aa_buf); 3230 3231 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3232 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3233 3234 g_io_done = false; 3235 g_compare_read_buf = aa_buf; 3236 g_compare_read_buf_len = sizeof(aa_buf); 3237 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3238 CU_ASSERT_EQUAL(rc, 0); 3239 num_completed = stub_complete_io(1); 3240 CU_ASSERT_EQUAL(num_completed, 1); 3241 CU_ASSERT(g_io_done == true); 3242 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3243 3244 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3245 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3246 3247 g_io_done = false; 3248 g_compare_read_buf = bb_buf; 3249 g_compare_read_buf_len = sizeof(bb_buf); 3250 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3251 CU_ASSERT_EQUAL(rc, 0); 3252 num_completed = stub_complete_io(1); 3253 CU_ASSERT_EQUAL(num_completed, 1); 3254 CU_ASSERT(g_io_done == true); 3255 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3256 3257 spdk_put_io_channel(ioch); 3258 spdk_bdev_close(desc); 3259 free_bdev(bdev); 3260 fn_table.submit_request = stub_submit_request; 3261 spdk_bdev_finish(bdev_fini_cb, NULL); 3262 poll_threads(); 3263 3264 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3265 3266 g_compare_read_buf = NULL; 3267 } 3268 3269 static void 3270 bdev_compare(void) 3271 { 3272 _bdev_compare(true); 3273 _bdev_compare(false); 3274 } 3275 3276 static void 3277 bdev_compare_and_write(void) 3278 { 3279 struct spdk_bdev *bdev; 3280 struct spdk_bdev_desc *desc = NULL; 3281 struct spdk_io_channel *ioch; 3282 struct ut_expected_io *expected_io; 3283 uint64_t offset, num_blocks; 3284 uint32_t num_completed; 3285 char aa_buf[512]; 3286 char bb_buf[512]; 3287 char cc_buf[512]; 3288 char write_buf[512]; 3289 struct iovec compare_iov; 3290 struct iovec write_iov; 3291 int rc; 3292 3293 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3294 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3295 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3296 3297 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3298 3299 spdk_bdev_initialize(bdev_init_cb, NULL); 3300 fn_table.submit_request = stub_submit_request_get_buf; 3301 bdev = allocate_bdev("bdev"); 3302 3303 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3304 CU_ASSERT_EQUAL(rc, 0); 3305 SPDK_CU_ASSERT_FATAL(desc != NULL); 3306 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3307 ioch = spdk_bdev_get_io_channel(desc); 3308 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3309 3310 fn_table.submit_request = stub_submit_request_get_buf; 3311 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3312 3313 offset = 50; 3314 num_blocks = 1; 3315 compare_iov.iov_base = aa_buf; 3316 compare_iov.iov_len = sizeof(aa_buf); 3317 write_iov.iov_base = bb_buf; 3318 write_iov.iov_len = sizeof(bb_buf); 3319 3320 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3321 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3322 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3323 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3324 3325 g_io_done = false; 3326 g_compare_read_buf = aa_buf; 3327 g_compare_read_buf_len = sizeof(aa_buf); 3328 memset(write_buf, 0, sizeof(write_buf)); 3329 g_compare_write_buf = write_buf; 3330 g_compare_write_buf_len = sizeof(write_buf); 3331 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3332 offset, num_blocks, io_done, NULL); 3333 /* Trigger range locking */ 3334 poll_threads(); 3335 CU_ASSERT_EQUAL(rc, 0); 3336 num_completed = stub_complete_io(1); 3337 CU_ASSERT_EQUAL(num_completed, 1); 3338 CU_ASSERT(g_io_done == false); 3339 num_completed = stub_complete_io(1); 3340 /* Trigger range unlocking */ 3341 poll_threads(); 3342 CU_ASSERT_EQUAL(num_completed, 1); 3343 CU_ASSERT(g_io_done == true); 3344 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3345 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3346 3347 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3348 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3349 3350 g_io_done = false; 3351 g_compare_read_buf = cc_buf; 3352 g_compare_read_buf_len = sizeof(cc_buf); 3353 memset(write_buf, 0, sizeof(write_buf)); 3354 g_compare_write_buf = write_buf; 3355 g_compare_write_buf_len = sizeof(write_buf); 3356 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3357 offset, num_blocks, io_done, NULL); 3358 /* Trigger range locking */ 3359 poll_threads(); 3360 CU_ASSERT_EQUAL(rc, 0); 3361 num_completed = stub_complete_io(1); 3362 /* Trigger range unlocking earlier because we expect error here */ 3363 poll_threads(); 3364 CU_ASSERT_EQUAL(num_completed, 1); 3365 CU_ASSERT(g_io_done == true); 3366 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3367 num_completed = stub_complete_io(1); 3368 CU_ASSERT_EQUAL(num_completed, 0); 3369 3370 spdk_put_io_channel(ioch); 3371 spdk_bdev_close(desc); 3372 free_bdev(bdev); 3373 fn_table.submit_request = stub_submit_request; 3374 spdk_bdev_finish(bdev_fini_cb, NULL); 3375 poll_threads(); 3376 3377 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3378 3379 g_compare_read_buf = NULL; 3380 g_compare_write_buf = NULL; 3381 } 3382 3383 static void 3384 bdev_write_zeroes(void) 3385 { 3386 struct spdk_bdev *bdev; 3387 struct spdk_bdev_desc *desc = NULL; 3388 struct spdk_io_channel *ioch; 3389 struct ut_expected_io *expected_io; 3390 uint64_t offset, num_io_blocks, num_blocks; 3391 uint32_t num_completed, num_requests; 3392 int rc; 3393 3394 spdk_bdev_initialize(bdev_init_cb, NULL); 3395 bdev = allocate_bdev("bdev"); 3396 3397 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3398 CU_ASSERT_EQUAL(rc, 0); 3399 SPDK_CU_ASSERT_FATAL(desc != NULL); 3400 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3401 ioch = spdk_bdev_get_io_channel(desc); 3402 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3403 3404 fn_table.submit_request = stub_submit_request; 3405 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3406 3407 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3408 bdev->md_len = 0; 3409 bdev->blocklen = 4096; 3410 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3411 3412 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3413 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3414 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3415 CU_ASSERT_EQUAL(rc, 0); 3416 num_completed = stub_complete_io(1); 3417 CU_ASSERT_EQUAL(num_completed, 1); 3418 3419 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3420 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3421 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3422 num_requests = 2; 3423 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3424 3425 for (offset = 0; offset < num_requests; ++offset) { 3426 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3427 offset * num_io_blocks, num_io_blocks, 0); 3428 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3429 } 3430 3431 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3432 CU_ASSERT_EQUAL(rc, 0); 3433 num_completed = stub_complete_io(num_requests); 3434 CU_ASSERT_EQUAL(num_completed, num_requests); 3435 3436 /* Check that the splitting is correct if bdev has interleaved metadata */ 3437 bdev->md_interleave = true; 3438 bdev->md_len = 64; 3439 bdev->blocklen = 4096 + 64; 3440 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3441 3442 num_requests = offset = 0; 3443 while (offset < num_blocks) { 3444 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3445 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3446 offset, num_io_blocks, 0); 3447 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3448 offset += num_io_blocks; 3449 num_requests++; 3450 } 3451 3452 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3453 CU_ASSERT_EQUAL(rc, 0); 3454 num_completed = stub_complete_io(num_requests); 3455 CU_ASSERT_EQUAL(num_completed, num_requests); 3456 num_completed = stub_complete_io(num_requests); 3457 assert(num_completed == 0); 3458 3459 /* Check the the same for separate metadata buffer */ 3460 bdev->md_interleave = false; 3461 bdev->md_len = 64; 3462 bdev->blocklen = 4096; 3463 3464 num_requests = offset = 0; 3465 while (offset < num_blocks) { 3466 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3467 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3468 offset, num_io_blocks, 0); 3469 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3470 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3471 offset += num_io_blocks; 3472 num_requests++; 3473 } 3474 3475 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3476 CU_ASSERT_EQUAL(rc, 0); 3477 num_completed = stub_complete_io(num_requests); 3478 CU_ASSERT_EQUAL(num_completed, num_requests); 3479 3480 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3481 spdk_put_io_channel(ioch); 3482 spdk_bdev_close(desc); 3483 free_bdev(bdev); 3484 spdk_bdev_finish(bdev_fini_cb, NULL); 3485 poll_threads(); 3486 } 3487 3488 static void 3489 bdev_zcopy_write(void) 3490 { 3491 struct spdk_bdev *bdev; 3492 struct spdk_bdev_desc *desc = NULL; 3493 struct spdk_io_channel *ioch; 3494 struct ut_expected_io *expected_io; 3495 uint64_t offset, num_blocks; 3496 uint32_t num_completed; 3497 char aa_buf[512]; 3498 struct iovec iov; 3499 int rc; 3500 const bool populate = false; 3501 const bool commit = true; 3502 3503 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3504 3505 spdk_bdev_initialize(bdev_init_cb, NULL); 3506 bdev = allocate_bdev("bdev"); 3507 3508 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3509 CU_ASSERT_EQUAL(rc, 0); 3510 SPDK_CU_ASSERT_FATAL(desc != NULL); 3511 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3512 ioch = spdk_bdev_get_io_channel(desc); 3513 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3514 3515 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3516 3517 offset = 50; 3518 num_blocks = 1; 3519 iov.iov_base = NULL; 3520 iov.iov_len = 0; 3521 3522 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3523 g_zcopy_read_buf_len = (uint32_t) -1; 3524 /* Do a zcopy start for a write (populate=false) */ 3525 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3526 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3527 g_io_done = false; 3528 g_zcopy_write_buf = aa_buf; 3529 g_zcopy_write_buf_len = sizeof(aa_buf); 3530 g_zcopy_bdev_io = NULL; 3531 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3532 CU_ASSERT_EQUAL(rc, 0); 3533 num_completed = stub_complete_io(1); 3534 CU_ASSERT_EQUAL(num_completed, 1); 3535 CU_ASSERT(g_io_done == true); 3536 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3537 /* Check that the iov has been set up */ 3538 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3539 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3540 /* Check that the bdev_io has been saved */ 3541 CU_ASSERT(g_zcopy_bdev_io != NULL); 3542 /* Now do the zcopy end for a write (commit=true) */ 3543 g_io_done = false; 3544 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3545 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3546 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3547 CU_ASSERT_EQUAL(rc, 0); 3548 num_completed = stub_complete_io(1); 3549 CU_ASSERT_EQUAL(num_completed, 1); 3550 CU_ASSERT(g_io_done == true); 3551 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3552 /* Check the g_zcopy are reset by io_done */ 3553 CU_ASSERT(g_zcopy_write_buf == NULL); 3554 CU_ASSERT(g_zcopy_write_buf_len == 0); 3555 /* Check that io_done has freed the g_zcopy_bdev_io */ 3556 CU_ASSERT(g_zcopy_bdev_io == NULL); 3557 3558 /* Check the zcopy read buffer has not been touched which 3559 * ensures that the correct buffers were used. 3560 */ 3561 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3562 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3563 3564 spdk_put_io_channel(ioch); 3565 spdk_bdev_close(desc); 3566 free_bdev(bdev); 3567 spdk_bdev_finish(bdev_fini_cb, NULL); 3568 poll_threads(); 3569 } 3570 3571 static void 3572 bdev_zcopy_read(void) 3573 { 3574 struct spdk_bdev *bdev; 3575 struct spdk_bdev_desc *desc = NULL; 3576 struct spdk_io_channel *ioch; 3577 struct ut_expected_io *expected_io; 3578 uint64_t offset, num_blocks; 3579 uint32_t num_completed; 3580 char aa_buf[512]; 3581 struct iovec iov; 3582 int rc; 3583 const bool populate = true; 3584 const bool commit = false; 3585 3586 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3587 3588 spdk_bdev_initialize(bdev_init_cb, NULL); 3589 bdev = allocate_bdev("bdev"); 3590 3591 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3592 CU_ASSERT_EQUAL(rc, 0); 3593 SPDK_CU_ASSERT_FATAL(desc != NULL); 3594 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3595 ioch = spdk_bdev_get_io_channel(desc); 3596 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3597 3598 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3599 3600 offset = 50; 3601 num_blocks = 1; 3602 iov.iov_base = NULL; 3603 iov.iov_len = 0; 3604 3605 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3606 g_zcopy_write_buf_len = (uint32_t) -1; 3607 3608 /* Do a zcopy start for a read (populate=true) */ 3609 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3610 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3611 g_io_done = false; 3612 g_zcopy_read_buf = aa_buf; 3613 g_zcopy_read_buf_len = sizeof(aa_buf); 3614 g_zcopy_bdev_io = NULL; 3615 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3616 CU_ASSERT_EQUAL(rc, 0); 3617 num_completed = stub_complete_io(1); 3618 CU_ASSERT_EQUAL(num_completed, 1); 3619 CU_ASSERT(g_io_done == true); 3620 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3621 /* Check that the iov has been set up */ 3622 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3623 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3624 /* Check that the bdev_io has been saved */ 3625 CU_ASSERT(g_zcopy_bdev_io != NULL); 3626 3627 /* Now do the zcopy end for a read (commit=false) */ 3628 g_io_done = false; 3629 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3630 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3631 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3632 CU_ASSERT_EQUAL(rc, 0); 3633 num_completed = stub_complete_io(1); 3634 CU_ASSERT_EQUAL(num_completed, 1); 3635 CU_ASSERT(g_io_done == true); 3636 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3637 /* Check the g_zcopy are reset by io_done */ 3638 CU_ASSERT(g_zcopy_read_buf == NULL); 3639 CU_ASSERT(g_zcopy_read_buf_len == 0); 3640 /* Check that io_done has freed the g_zcopy_bdev_io */ 3641 CU_ASSERT(g_zcopy_bdev_io == NULL); 3642 3643 /* Check the zcopy write buffer has not been touched which 3644 * ensures that the correct buffers were used. 3645 */ 3646 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3647 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3648 3649 spdk_put_io_channel(ioch); 3650 spdk_bdev_close(desc); 3651 free_bdev(bdev); 3652 spdk_bdev_finish(bdev_fini_cb, NULL); 3653 poll_threads(); 3654 } 3655 3656 static void 3657 bdev_open_while_hotremove(void) 3658 { 3659 struct spdk_bdev *bdev; 3660 struct spdk_bdev_desc *desc[2] = {}; 3661 int rc; 3662 3663 bdev = allocate_bdev("bdev"); 3664 3665 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3666 CU_ASSERT(rc == 0); 3667 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3668 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3669 3670 spdk_bdev_unregister(bdev, NULL, NULL); 3671 3672 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3673 CU_ASSERT(rc == -ENODEV); 3674 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3675 3676 spdk_bdev_close(desc[0]); 3677 free_bdev(bdev); 3678 } 3679 3680 static void 3681 bdev_close_while_hotremove(void) 3682 { 3683 struct spdk_bdev *bdev; 3684 struct spdk_bdev_desc *desc = NULL; 3685 int rc = 0; 3686 3687 bdev = allocate_bdev("bdev"); 3688 3689 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3690 CU_ASSERT_EQUAL(rc, 0); 3691 SPDK_CU_ASSERT_FATAL(desc != NULL); 3692 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3693 3694 /* Simulate hot-unplug by unregistering bdev */ 3695 g_event_type1 = 0xFF; 3696 g_unregister_arg = NULL; 3697 g_unregister_rc = -1; 3698 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3699 /* Close device while remove event is in flight */ 3700 spdk_bdev_close(desc); 3701 3702 /* Ensure that unregister callback is delayed */ 3703 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3704 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3705 3706 poll_threads(); 3707 3708 /* Event callback shall not be issued because device was closed */ 3709 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3710 /* Unregister callback is issued */ 3711 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3712 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3713 3714 free_bdev(bdev); 3715 } 3716 3717 static void 3718 bdev_open_ext(void) 3719 { 3720 struct spdk_bdev *bdev; 3721 struct spdk_bdev_desc *desc1 = NULL; 3722 struct spdk_bdev_desc *desc2 = NULL; 3723 int rc = 0; 3724 3725 bdev = allocate_bdev("bdev"); 3726 3727 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3728 CU_ASSERT_EQUAL(rc, -EINVAL); 3729 3730 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3731 CU_ASSERT_EQUAL(rc, 0); 3732 3733 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3734 CU_ASSERT_EQUAL(rc, 0); 3735 3736 g_event_type1 = 0xFF; 3737 g_event_type2 = 0xFF; 3738 3739 /* Simulate hot-unplug by unregistering bdev */ 3740 spdk_bdev_unregister(bdev, NULL, NULL); 3741 poll_threads(); 3742 3743 /* Check if correct events have been triggered in event callback fn */ 3744 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3745 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3746 3747 free_bdev(bdev); 3748 poll_threads(); 3749 } 3750 3751 struct timeout_io_cb_arg { 3752 struct iovec iov; 3753 uint8_t type; 3754 }; 3755 3756 static int 3757 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3758 { 3759 struct spdk_bdev_io *bdev_io; 3760 int n = 0; 3761 3762 if (!ch) { 3763 return -1; 3764 } 3765 3766 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3767 n++; 3768 } 3769 3770 return n; 3771 } 3772 3773 static void 3774 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3775 { 3776 struct timeout_io_cb_arg *ctx = cb_arg; 3777 3778 ctx->type = bdev_io->type; 3779 ctx->iov.iov_base = bdev_io->iov.iov_base; 3780 ctx->iov.iov_len = bdev_io->iov.iov_len; 3781 } 3782 3783 static void 3784 bdev_set_io_timeout(void) 3785 { 3786 struct spdk_bdev *bdev; 3787 struct spdk_bdev_desc *desc = NULL; 3788 struct spdk_io_channel *io_ch = NULL; 3789 struct spdk_bdev_channel *bdev_ch = NULL; 3790 struct timeout_io_cb_arg cb_arg; 3791 3792 spdk_bdev_initialize(bdev_init_cb, NULL); 3793 3794 bdev = allocate_bdev("bdev"); 3795 3796 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3797 SPDK_CU_ASSERT_FATAL(desc != NULL); 3798 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3799 3800 io_ch = spdk_bdev_get_io_channel(desc); 3801 CU_ASSERT(io_ch != NULL); 3802 3803 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3804 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3805 3806 /* This is the part1. 3807 * We will check the bdev_ch->io_submitted list 3808 * TO make sure that it can link IOs and only the user submitted IOs 3809 */ 3810 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3811 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3812 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3813 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3814 stub_complete_io(1); 3815 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3816 stub_complete_io(1); 3817 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3818 3819 /* Split IO */ 3820 bdev->optimal_io_boundary = 16; 3821 bdev->split_on_optimal_io_boundary = true; 3822 3823 /* Now test that a single-vector command is split correctly. 3824 * Offset 14, length 8, payload 0xF000 3825 * Child - Offset 14, length 2, payload 0xF000 3826 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3827 * 3828 * Set up the expected values before calling spdk_bdev_read_blocks 3829 */ 3830 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3831 /* We count all submitted IOs including IO that are generated by splitting. */ 3832 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 3833 stub_complete_io(1); 3834 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3835 stub_complete_io(1); 3836 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3837 3838 /* Also include the reset IO */ 3839 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3840 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3841 poll_threads(); 3842 stub_complete_io(1); 3843 poll_threads(); 3844 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3845 3846 /* This is part2 3847 * Test the desc timeout poller register 3848 */ 3849 3850 /* Successfully set the timeout */ 3851 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3852 CU_ASSERT(desc->io_timeout_poller != NULL); 3853 CU_ASSERT(desc->timeout_in_sec == 30); 3854 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3855 CU_ASSERT(desc->cb_arg == &cb_arg); 3856 3857 /* Change the timeout limit */ 3858 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3859 CU_ASSERT(desc->io_timeout_poller != NULL); 3860 CU_ASSERT(desc->timeout_in_sec == 20); 3861 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3862 CU_ASSERT(desc->cb_arg == &cb_arg); 3863 3864 /* Disable the timeout */ 3865 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 3866 CU_ASSERT(desc->io_timeout_poller == NULL); 3867 3868 /* This the part3 3869 * We will test to catch timeout IO and check whether the IO is 3870 * the submitted one. 3871 */ 3872 memset(&cb_arg, 0, sizeof(cb_arg)); 3873 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3874 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 3875 3876 /* Don't reach the limit */ 3877 spdk_delay_us(15 * spdk_get_ticks_hz()); 3878 poll_threads(); 3879 CU_ASSERT(cb_arg.type == 0); 3880 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3881 CU_ASSERT(cb_arg.iov.iov_len == 0); 3882 3883 /* 15 + 15 = 30 reach the limit */ 3884 spdk_delay_us(15 * spdk_get_ticks_hz()); 3885 poll_threads(); 3886 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3887 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 3888 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 3889 stub_complete_io(1); 3890 3891 /* Use the same split IO above and check the IO */ 3892 memset(&cb_arg, 0, sizeof(cb_arg)); 3893 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3894 3895 /* The first child complete in time */ 3896 spdk_delay_us(15 * spdk_get_ticks_hz()); 3897 poll_threads(); 3898 stub_complete_io(1); 3899 CU_ASSERT(cb_arg.type == 0); 3900 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3901 CU_ASSERT(cb_arg.iov.iov_len == 0); 3902 3903 /* The second child reach the limit */ 3904 spdk_delay_us(15 * spdk_get_ticks_hz()); 3905 poll_threads(); 3906 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3907 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 3908 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 3909 stub_complete_io(1); 3910 3911 /* Also include the reset IO */ 3912 memset(&cb_arg, 0, sizeof(cb_arg)); 3913 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3914 spdk_delay_us(30 * spdk_get_ticks_hz()); 3915 poll_threads(); 3916 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 3917 stub_complete_io(1); 3918 poll_threads(); 3919 3920 spdk_put_io_channel(io_ch); 3921 spdk_bdev_close(desc); 3922 free_bdev(bdev); 3923 spdk_bdev_finish(bdev_fini_cb, NULL); 3924 poll_threads(); 3925 } 3926 3927 static void 3928 lba_range_overlap(void) 3929 { 3930 struct lba_range r1, r2; 3931 3932 r1.offset = 100; 3933 r1.length = 50; 3934 3935 r2.offset = 0; 3936 r2.length = 1; 3937 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3938 3939 r2.offset = 0; 3940 r2.length = 100; 3941 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3942 3943 r2.offset = 0; 3944 r2.length = 110; 3945 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3946 3947 r2.offset = 100; 3948 r2.length = 10; 3949 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3950 3951 r2.offset = 110; 3952 r2.length = 20; 3953 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3954 3955 r2.offset = 140; 3956 r2.length = 150; 3957 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3958 3959 r2.offset = 130; 3960 r2.length = 200; 3961 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3962 3963 r2.offset = 150; 3964 r2.length = 100; 3965 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3966 3967 r2.offset = 110; 3968 r2.length = 0; 3969 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3970 } 3971 3972 static bool g_lock_lba_range_done; 3973 static bool g_unlock_lba_range_done; 3974 3975 static void 3976 lock_lba_range_done(void *ctx, int status) 3977 { 3978 g_lock_lba_range_done = true; 3979 } 3980 3981 static void 3982 unlock_lba_range_done(void *ctx, int status) 3983 { 3984 g_unlock_lba_range_done = true; 3985 } 3986 3987 static void 3988 lock_lba_range_check_ranges(void) 3989 { 3990 struct spdk_bdev *bdev; 3991 struct spdk_bdev_desc *desc = NULL; 3992 struct spdk_io_channel *io_ch; 3993 struct spdk_bdev_channel *channel; 3994 struct lba_range *range; 3995 int ctx1; 3996 int rc; 3997 3998 spdk_bdev_initialize(bdev_init_cb, NULL); 3999 4000 bdev = allocate_bdev("bdev0"); 4001 4002 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4003 CU_ASSERT(rc == 0); 4004 CU_ASSERT(desc != NULL); 4005 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4006 io_ch = spdk_bdev_get_io_channel(desc); 4007 CU_ASSERT(io_ch != NULL); 4008 channel = spdk_io_channel_get_ctx(io_ch); 4009 4010 g_lock_lba_range_done = false; 4011 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4012 CU_ASSERT(rc == 0); 4013 poll_threads(); 4014 4015 CU_ASSERT(g_lock_lba_range_done == true); 4016 range = TAILQ_FIRST(&channel->locked_ranges); 4017 SPDK_CU_ASSERT_FATAL(range != NULL); 4018 CU_ASSERT(range->offset == 20); 4019 CU_ASSERT(range->length == 10); 4020 CU_ASSERT(range->owner_ch == channel); 4021 4022 /* Unlocks must exactly match a lock. */ 4023 g_unlock_lba_range_done = false; 4024 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4025 CU_ASSERT(rc == -EINVAL); 4026 CU_ASSERT(g_unlock_lba_range_done == false); 4027 4028 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4029 CU_ASSERT(rc == 0); 4030 spdk_delay_us(100); 4031 poll_threads(); 4032 4033 CU_ASSERT(g_unlock_lba_range_done == true); 4034 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4035 4036 spdk_put_io_channel(io_ch); 4037 spdk_bdev_close(desc); 4038 free_bdev(bdev); 4039 spdk_bdev_finish(bdev_fini_cb, NULL); 4040 poll_threads(); 4041 } 4042 4043 static void 4044 lock_lba_range_with_io_outstanding(void) 4045 { 4046 struct spdk_bdev *bdev; 4047 struct spdk_bdev_desc *desc = NULL; 4048 struct spdk_io_channel *io_ch; 4049 struct spdk_bdev_channel *channel; 4050 struct lba_range *range; 4051 char buf[4096]; 4052 int ctx1; 4053 int rc; 4054 4055 spdk_bdev_initialize(bdev_init_cb, NULL); 4056 4057 bdev = allocate_bdev("bdev0"); 4058 4059 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4060 CU_ASSERT(rc == 0); 4061 CU_ASSERT(desc != NULL); 4062 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4063 io_ch = spdk_bdev_get_io_channel(desc); 4064 CU_ASSERT(io_ch != NULL); 4065 channel = spdk_io_channel_get_ctx(io_ch); 4066 4067 g_io_done = false; 4068 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4069 CU_ASSERT(rc == 0); 4070 4071 g_lock_lba_range_done = false; 4072 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4073 CU_ASSERT(rc == 0); 4074 poll_threads(); 4075 4076 /* The lock should immediately become valid, since there are no outstanding 4077 * write I/O. 4078 */ 4079 CU_ASSERT(g_io_done == false); 4080 CU_ASSERT(g_lock_lba_range_done == true); 4081 range = TAILQ_FIRST(&channel->locked_ranges); 4082 SPDK_CU_ASSERT_FATAL(range != NULL); 4083 CU_ASSERT(range->offset == 20); 4084 CU_ASSERT(range->length == 10); 4085 CU_ASSERT(range->owner_ch == channel); 4086 CU_ASSERT(range->locked_ctx == &ctx1); 4087 4088 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4089 CU_ASSERT(rc == 0); 4090 stub_complete_io(1); 4091 spdk_delay_us(100); 4092 poll_threads(); 4093 4094 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4095 4096 /* Now try again, but with a write I/O. */ 4097 g_io_done = false; 4098 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4099 CU_ASSERT(rc == 0); 4100 4101 g_lock_lba_range_done = false; 4102 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4103 CU_ASSERT(rc == 0); 4104 poll_threads(); 4105 4106 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4107 * But note that the range should be on the channel's locked_list, to make sure no 4108 * new write I/O are started. 4109 */ 4110 CU_ASSERT(g_io_done == false); 4111 CU_ASSERT(g_lock_lba_range_done == false); 4112 range = TAILQ_FIRST(&channel->locked_ranges); 4113 SPDK_CU_ASSERT_FATAL(range != NULL); 4114 CU_ASSERT(range->offset == 20); 4115 CU_ASSERT(range->length == 10); 4116 4117 /* Complete the write I/O. This should make the lock valid (checked by confirming 4118 * our callback was invoked). 4119 */ 4120 stub_complete_io(1); 4121 spdk_delay_us(100); 4122 poll_threads(); 4123 CU_ASSERT(g_io_done == true); 4124 CU_ASSERT(g_lock_lba_range_done == true); 4125 4126 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4127 CU_ASSERT(rc == 0); 4128 poll_threads(); 4129 4130 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4131 4132 spdk_put_io_channel(io_ch); 4133 spdk_bdev_close(desc); 4134 free_bdev(bdev); 4135 spdk_bdev_finish(bdev_fini_cb, NULL); 4136 poll_threads(); 4137 } 4138 4139 static void 4140 lock_lba_range_overlapped(void) 4141 { 4142 struct spdk_bdev *bdev; 4143 struct spdk_bdev_desc *desc = NULL; 4144 struct spdk_io_channel *io_ch; 4145 struct spdk_bdev_channel *channel; 4146 struct lba_range *range; 4147 int ctx1; 4148 int rc; 4149 4150 spdk_bdev_initialize(bdev_init_cb, NULL); 4151 4152 bdev = allocate_bdev("bdev0"); 4153 4154 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4155 CU_ASSERT(rc == 0); 4156 CU_ASSERT(desc != NULL); 4157 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4158 io_ch = spdk_bdev_get_io_channel(desc); 4159 CU_ASSERT(io_ch != NULL); 4160 channel = spdk_io_channel_get_ctx(io_ch); 4161 4162 /* Lock range 20-29. */ 4163 g_lock_lba_range_done = false; 4164 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4165 CU_ASSERT(rc == 0); 4166 poll_threads(); 4167 4168 CU_ASSERT(g_lock_lba_range_done == true); 4169 range = TAILQ_FIRST(&channel->locked_ranges); 4170 SPDK_CU_ASSERT_FATAL(range != NULL); 4171 CU_ASSERT(range->offset == 20); 4172 CU_ASSERT(range->length == 10); 4173 4174 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4175 * 20-29. 4176 */ 4177 g_lock_lba_range_done = false; 4178 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4179 CU_ASSERT(rc == 0); 4180 poll_threads(); 4181 4182 CU_ASSERT(g_lock_lba_range_done == false); 4183 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4184 SPDK_CU_ASSERT_FATAL(range != NULL); 4185 CU_ASSERT(range->offset == 25); 4186 CU_ASSERT(range->length == 15); 4187 4188 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4189 * no longer overlaps with an active lock. 4190 */ 4191 g_unlock_lba_range_done = false; 4192 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4193 CU_ASSERT(rc == 0); 4194 poll_threads(); 4195 4196 CU_ASSERT(g_unlock_lba_range_done == true); 4197 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4198 range = TAILQ_FIRST(&channel->locked_ranges); 4199 SPDK_CU_ASSERT_FATAL(range != NULL); 4200 CU_ASSERT(range->offset == 25); 4201 CU_ASSERT(range->length == 15); 4202 4203 /* Lock 40-59. This should immediately lock since it does not overlap with the 4204 * currently active 25-39 lock. 4205 */ 4206 g_lock_lba_range_done = false; 4207 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4208 CU_ASSERT(rc == 0); 4209 poll_threads(); 4210 4211 CU_ASSERT(g_lock_lba_range_done == true); 4212 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4213 SPDK_CU_ASSERT_FATAL(range != NULL); 4214 range = TAILQ_NEXT(range, tailq); 4215 SPDK_CU_ASSERT_FATAL(range != NULL); 4216 CU_ASSERT(range->offset == 40); 4217 CU_ASSERT(range->length == 20); 4218 4219 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4220 g_lock_lba_range_done = false; 4221 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4222 CU_ASSERT(rc == 0); 4223 poll_threads(); 4224 4225 CU_ASSERT(g_lock_lba_range_done == false); 4226 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4227 SPDK_CU_ASSERT_FATAL(range != NULL); 4228 CU_ASSERT(range->offset == 35); 4229 CU_ASSERT(range->length == 10); 4230 4231 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4232 * the 40-59 lock is still active. 4233 */ 4234 g_unlock_lba_range_done = false; 4235 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4236 CU_ASSERT(rc == 0); 4237 poll_threads(); 4238 4239 CU_ASSERT(g_unlock_lba_range_done == true); 4240 CU_ASSERT(g_lock_lba_range_done == false); 4241 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4242 SPDK_CU_ASSERT_FATAL(range != NULL); 4243 CU_ASSERT(range->offset == 35); 4244 CU_ASSERT(range->length == 10); 4245 4246 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4247 * no longer any active overlapping locks. 4248 */ 4249 g_unlock_lba_range_done = false; 4250 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4251 CU_ASSERT(rc == 0); 4252 poll_threads(); 4253 4254 CU_ASSERT(g_unlock_lba_range_done == true); 4255 CU_ASSERT(g_lock_lba_range_done == true); 4256 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4257 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4258 SPDK_CU_ASSERT_FATAL(range != NULL); 4259 CU_ASSERT(range->offset == 35); 4260 CU_ASSERT(range->length == 10); 4261 4262 /* Finally, unlock 35-44. */ 4263 g_unlock_lba_range_done = false; 4264 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4265 CU_ASSERT(rc == 0); 4266 poll_threads(); 4267 4268 CU_ASSERT(g_unlock_lba_range_done == true); 4269 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4270 4271 spdk_put_io_channel(io_ch); 4272 spdk_bdev_close(desc); 4273 free_bdev(bdev); 4274 spdk_bdev_finish(bdev_fini_cb, NULL); 4275 poll_threads(); 4276 } 4277 4278 static void 4279 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4280 { 4281 g_abort_done = true; 4282 g_abort_status = bdev_io->internal.status; 4283 spdk_bdev_free_io(bdev_io); 4284 } 4285 4286 static void 4287 bdev_io_abort(void) 4288 { 4289 struct spdk_bdev *bdev; 4290 struct spdk_bdev_desc *desc = NULL; 4291 struct spdk_io_channel *io_ch; 4292 struct spdk_bdev_channel *channel; 4293 struct spdk_bdev_mgmt_channel *mgmt_ch; 4294 struct spdk_bdev_opts bdev_opts = {}; 4295 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4296 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4297 int rc; 4298 4299 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4300 bdev_opts.bdev_io_pool_size = 7; 4301 bdev_opts.bdev_io_cache_size = 2; 4302 4303 rc = spdk_bdev_set_opts(&bdev_opts); 4304 CU_ASSERT(rc == 0); 4305 spdk_bdev_initialize(bdev_init_cb, NULL); 4306 4307 bdev = allocate_bdev("bdev0"); 4308 4309 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4310 CU_ASSERT(rc == 0); 4311 CU_ASSERT(desc != NULL); 4312 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4313 io_ch = spdk_bdev_get_io_channel(desc); 4314 CU_ASSERT(io_ch != NULL); 4315 channel = spdk_io_channel_get_ctx(io_ch); 4316 mgmt_ch = channel->shared_resource->mgmt_ch; 4317 4318 g_abort_done = false; 4319 4320 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4321 4322 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4323 CU_ASSERT(rc == -ENOTSUP); 4324 4325 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4326 4327 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4328 CU_ASSERT(rc == 0); 4329 CU_ASSERT(g_abort_done == true); 4330 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4331 4332 /* Test the case that the target I/O was successfully aborted. */ 4333 g_io_done = false; 4334 4335 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4336 CU_ASSERT(rc == 0); 4337 CU_ASSERT(g_io_done == false); 4338 4339 g_abort_done = false; 4340 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4341 4342 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4343 CU_ASSERT(rc == 0); 4344 CU_ASSERT(g_io_done == true); 4345 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4346 stub_complete_io(1); 4347 CU_ASSERT(g_abort_done == true); 4348 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4349 4350 /* Test the case that the target I/O was not aborted because it completed 4351 * in the middle of execution of the abort. 4352 */ 4353 g_io_done = false; 4354 4355 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4356 CU_ASSERT(rc == 0); 4357 CU_ASSERT(g_io_done == false); 4358 4359 g_abort_done = false; 4360 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4361 4362 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4363 CU_ASSERT(rc == 0); 4364 CU_ASSERT(g_io_done == false); 4365 4366 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4367 stub_complete_io(1); 4368 CU_ASSERT(g_io_done == true); 4369 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4370 4371 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4372 stub_complete_io(1); 4373 CU_ASSERT(g_abort_done == true); 4374 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4375 4376 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4377 4378 bdev->optimal_io_boundary = 16; 4379 bdev->split_on_optimal_io_boundary = true; 4380 4381 /* Test that a single-vector command which is split is aborted correctly. 4382 * Offset 14, length 8, payload 0xF000 4383 * Child - Offset 14, length 2, payload 0xF000 4384 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4385 */ 4386 g_io_done = false; 4387 4388 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4389 CU_ASSERT(rc == 0); 4390 CU_ASSERT(g_io_done == false); 4391 4392 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4393 4394 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4395 4396 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4397 CU_ASSERT(rc == 0); 4398 CU_ASSERT(g_io_done == true); 4399 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4400 stub_complete_io(2); 4401 CU_ASSERT(g_abort_done == true); 4402 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4403 4404 /* Test that a multi-vector command that needs to be split by strip and then 4405 * needs to be split is aborted correctly. Abort is requested before the second 4406 * child I/O was submitted. The parent I/O should complete with failure without 4407 * submitting the second child I/O. 4408 */ 4409 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4410 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4411 iov[i].iov_len = 512; 4412 } 4413 4414 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4415 g_io_done = false; 4416 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4417 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4418 CU_ASSERT(rc == 0); 4419 CU_ASSERT(g_io_done == false); 4420 4421 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4422 4423 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4424 4425 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4426 CU_ASSERT(rc == 0); 4427 CU_ASSERT(g_io_done == true); 4428 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4429 stub_complete_io(1); 4430 CU_ASSERT(g_abort_done == true); 4431 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4432 4433 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4434 4435 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4436 4437 bdev->optimal_io_boundary = 16; 4438 g_io_done = false; 4439 4440 /* Test that a ingle-vector command which is split is aborted correctly. 4441 * Differently from the above, the child abort request will be submitted 4442 * sequentially due to the capacity of spdk_bdev_io. 4443 */ 4444 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4445 CU_ASSERT(rc == 0); 4446 CU_ASSERT(g_io_done == false); 4447 4448 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4449 4450 g_abort_done = false; 4451 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4452 4453 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4454 CU_ASSERT(rc == 0); 4455 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4456 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4457 4458 stub_complete_io(1); 4459 CU_ASSERT(g_io_done == true); 4460 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4461 stub_complete_io(3); 4462 CU_ASSERT(g_abort_done == true); 4463 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4464 4465 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4466 4467 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4468 4469 spdk_put_io_channel(io_ch); 4470 spdk_bdev_close(desc); 4471 free_bdev(bdev); 4472 spdk_bdev_finish(bdev_fini_cb, NULL); 4473 poll_threads(); 4474 } 4475 4476 static void 4477 bdev_unmap(void) 4478 { 4479 struct spdk_bdev *bdev; 4480 struct spdk_bdev_desc *desc = NULL; 4481 struct spdk_io_channel *ioch; 4482 struct spdk_bdev_channel *bdev_ch; 4483 struct ut_expected_io *expected_io; 4484 struct spdk_bdev_opts bdev_opts = {}; 4485 uint32_t i, num_outstanding; 4486 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4487 int rc; 4488 4489 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4490 bdev_opts.bdev_io_pool_size = 512; 4491 bdev_opts.bdev_io_cache_size = 64; 4492 rc = spdk_bdev_set_opts(&bdev_opts); 4493 CU_ASSERT(rc == 0); 4494 4495 spdk_bdev_initialize(bdev_init_cb, NULL); 4496 bdev = allocate_bdev("bdev"); 4497 4498 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4499 CU_ASSERT_EQUAL(rc, 0); 4500 SPDK_CU_ASSERT_FATAL(desc != NULL); 4501 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4502 ioch = spdk_bdev_get_io_channel(desc); 4503 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4504 bdev_ch = spdk_io_channel_get_ctx(ioch); 4505 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4506 4507 fn_table.submit_request = stub_submit_request; 4508 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4509 4510 /* Case 1: First test the request won't be split */ 4511 num_blocks = 32; 4512 4513 g_io_done = false; 4514 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4516 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4517 CU_ASSERT_EQUAL(rc, 0); 4518 CU_ASSERT(g_io_done == false); 4519 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4520 stub_complete_io(1); 4521 CU_ASSERT(g_io_done == true); 4522 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4523 4524 /* Case 2: Test the split with 2 children requests */ 4525 bdev->max_unmap = 8; 4526 bdev->max_unmap_segments = 2; 4527 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4528 num_blocks = max_unmap_blocks * 2; 4529 offset = 0; 4530 4531 g_io_done = false; 4532 for (i = 0; i < 2; i++) { 4533 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4534 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4535 offset += max_unmap_blocks; 4536 } 4537 4538 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4539 CU_ASSERT_EQUAL(rc, 0); 4540 CU_ASSERT(g_io_done == false); 4541 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4542 stub_complete_io(2); 4543 CU_ASSERT(g_io_done == true); 4544 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4545 4546 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4547 num_children = 15; 4548 num_blocks = max_unmap_blocks * num_children; 4549 g_io_done = false; 4550 offset = 0; 4551 for (i = 0; i < num_children; i++) { 4552 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4553 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4554 offset += max_unmap_blocks; 4555 } 4556 4557 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4558 CU_ASSERT_EQUAL(rc, 0); 4559 CU_ASSERT(g_io_done == false); 4560 4561 while (num_children > 0) { 4562 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4563 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4564 stub_complete_io(num_outstanding); 4565 num_children -= num_outstanding; 4566 } 4567 CU_ASSERT(g_io_done == true); 4568 4569 spdk_put_io_channel(ioch); 4570 spdk_bdev_close(desc); 4571 free_bdev(bdev); 4572 spdk_bdev_finish(bdev_fini_cb, NULL); 4573 poll_threads(); 4574 } 4575 4576 static void 4577 bdev_write_zeroes_split_test(void) 4578 { 4579 struct spdk_bdev *bdev; 4580 struct spdk_bdev_desc *desc = NULL; 4581 struct spdk_io_channel *ioch; 4582 struct spdk_bdev_channel *bdev_ch; 4583 struct ut_expected_io *expected_io; 4584 struct spdk_bdev_opts bdev_opts = {}; 4585 uint32_t i, num_outstanding; 4586 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 4587 int rc; 4588 4589 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4590 bdev_opts.bdev_io_pool_size = 512; 4591 bdev_opts.bdev_io_cache_size = 64; 4592 rc = spdk_bdev_set_opts(&bdev_opts); 4593 CU_ASSERT(rc == 0); 4594 4595 spdk_bdev_initialize(bdev_init_cb, NULL); 4596 bdev = allocate_bdev("bdev"); 4597 4598 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4599 CU_ASSERT_EQUAL(rc, 0); 4600 SPDK_CU_ASSERT_FATAL(desc != NULL); 4601 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4602 ioch = spdk_bdev_get_io_channel(desc); 4603 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4604 bdev_ch = spdk_io_channel_get_ctx(ioch); 4605 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4606 4607 fn_table.submit_request = stub_submit_request; 4608 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4609 4610 /* Case 1: First test the request won't be split */ 4611 num_blocks = 32; 4612 4613 g_io_done = false; 4614 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4615 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4616 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4617 CU_ASSERT_EQUAL(rc, 0); 4618 CU_ASSERT(g_io_done == false); 4619 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4620 stub_complete_io(1); 4621 CU_ASSERT(g_io_done == true); 4622 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4623 4624 /* Case 2: Test the split with 2 children requests */ 4625 max_write_zeroes_blocks = 8; 4626 bdev->max_write_zeroes = max_write_zeroes_blocks; 4627 num_blocks = max_write_zeroes_blocks * 2; 4628 offset = 0; 4629 4630 g_io_done = false; 4631 for (i = 0; i < 2; i++) { 4632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4633 0); 4634 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4635 offset += max_write_zeroes_blocks; 4636 } 4637 4638 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4639 CU_ASSERT_EQUAL(rc, 0); 4640 CU_ASSERT(g_io_done == false); 4641 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4642 stub_complete_io(2); 4643 CU_ASSERT(g_io_done == true); 4644 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4645 4646 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4647 num_children = 15; 4648 num_blocks = max_write_zeroes_blocks * num_children; 4649 g_io_done = false; 4650 offset = 0; 4651 for (i = 0; i < num_children; i++) { 4652 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4653 0); 4654 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4655 offset += max_write_zeroes_blocks; 4656 } 4657 4658 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4659 CU_ASSERT_EQUAL(rc, 0); 4660 CU_ASSERT(g_io_done == false); 4661 4662 while (num_children > 0) { 4663 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4664 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4665 stub_complete_io(num_outstanding); 4666 num_children -= num_outstanding; 4667 } 4668 CU_ASSERT(g_io_done == true); 4669 4670 spdk_put_io_channel(ioch); 4671 spdk_bdev_close(desc); 4672 free_bdev(bdev); 4673 spdk_bdev_finish(bdev_fini_cb, NULL); 4674 poll_threads(); 4675 } 4676 4677 static void 4678 bdev_set_options_test(void) 4679 { 4680 struct spdk_bdev_opts bdev_opts = {}; 4681 int rc; 4682 4683 /* Case1: Do not set opts_size */ 4684 rc = spdk_bdev_set_opts(&bdev_opts); 4685 CU_ASSERT(rc == -1); 4686 4687 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4688 bdev_opts.bdev_io_pool_size = 4; 4689 bdev_opts.bdev_io_cache_size = 2; 4690 bdev_opts.small_buf_pool_size = 4; 4691 4692 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4693 rc = spdk_bdev_set_opts(&bdev_opts); 4694 CU_ASSERT(rc == -1); 4695 4696 /* Case 3: Do not set valid large_buf_pool_size */ 4697 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4698 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4699 rc = spdk_bdev_set_opts(&bdev_opts); 4700 CU_ASSERT(rc == -1); 4701 4702 /* Case4: set valid large buf_pool_size */ 4703 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4704 rc = spdk_bdev_set_opts(&bdev_opts); 4705 CU_ASSERT(rc == 0); 4706 4707 /* Case5: Set different valid value for small and large buf pool */ 4708 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4709 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4710 rc = spdk_bdev_set_opts(&bdev_opts); 4711 CU_ASSERT(rc == 0); 4712 } 4713 4714 static uint64_t 4715 get_ns_time(void) 4716 { 4717 int rc; 4718 struct timespec ts; 4719 4720 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 4721 CU_ASSERT(rc == 0); 4722 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 4723 } 4724 4725 static int 4726 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 4727 { 4728 int h1, h2; 4729 4730 if (bdev_name == NULL) { 4731 return -1; 4732 } else { 4733 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 4734 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 4735 4736 return spdk_max(h1, h2) + 1; 4737 } 4738 } 4739 4740 static void 4741 bdev_multi_allocation(void) 4742 { 4743 const int max_bdev_num = 1024 * 16; 4744 char name[max_bdev_num][10]; 4745 char noexist_name[] = "invalid_bdev"; 4746 struct spdk_bdev *bdev[max_bdev_num]; 4747 int i, j; 4748 uint64_t last_time; 4749 int bdev_num; 4750 int height; 4751 4752 for (j = 0; j < max_bdev_num; j++) { 4753 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 4754 } 4755 4756 for (i = 0; i < 16; i++) { 4757 last_time = get_ns_time(); 4758 bdev_num = 1024 * (i + 1); 4759 for (j = 0; j < bdev_num; j++) { 4760 bdev[j] = allocate_bdev(name[j]); 4761 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 4762 CU_ASSERT(height <= (int)(spdk_u32log2(j + 1))); 4763 } 4764 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 4765 (get_ns_time() - last_time) / 1000 / 1000); 4766 for (j = 0; j < bdev_num; j++) { 4767 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 4768 } 4769 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 4770 4771 for (j = 0; j < bdev_num; j++) { 4772 free_bdev(bdev[j]); 4773 } 4774 for (j = 0; j < bdev_num; j++) { 4775 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 4776 } 4777 } 4778 } 4779 4780 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 4781 4782 static int 4783 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 4784 int array_size) 4785 { 4786 if (array_size > 0 && domains) { 4787 domains[0] = g_bdev_memory_domain; 4788 } 4789 4790 return 1; 4791 } 4792 4793 static void 4794 bdev_get_memory_domains(void) 4795 { 4796 struct spdk_bdev_fn_table fn_table = { 4797 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 4798 }; 4799 struct spdk_bdev bdev = { .fn_table = &fn_table }; 4800 struct spdk_memory_domain *domains[2] = {}; 4801 int rc; 4802 4803 /* bdev is NULL */ 4804 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 4805 CU_ASSERT(rc == -EINVAL); 4806 4807 /* domains is NULL */ 4808 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 4809 CU_ASSERT(rc == 1); 4810 4811 /* array size is 0 */ 4812 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 4813 CU_ASSERT(rc == 1); 4814 4815 /* get_supported_dma_device_types op is set */ 4816 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 4817 CU_ASSERT(rc == 1); 4818 CU_ASSERT(domains[0] == g_bdev_memory_domain); 4819 4820 /* get_supported_dma_device_types op is not set */ 4821 fn_table.get_memory_domains = NULL; 4822 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 4823 CU_ASSERT(rc == 0); 4824 } 4825 4826 static void 4827 bdev_writev_readv_ext(void) 4828 { 4829 struct spdk_bdev *bdev; 4830 struct spdk_bdev_desc *desc = NULL; 4831 struct spdk_io_channel *io_ch; 4832 struct iovec iov = { .iov_base = (void *)0xbaaddead, .iov_len = 0x1000 }; 4833 struct ut_expected_io *expected_io; 4834 struct spdk_bdev_ext_io_opts ext_io_opts = { 4835 .metadata = (void *)0xFF000000 4836 }; 4837 int rc; 4838 4839 spdk_bdev_initialize(bdev_init_cb, NULL); 4840 4841 bdev = allocate_bdev("bdev0"); 4842 bdev->md_interleave = false; 4843 bdev->md_len = 8; 4844 4845 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4846 CU_ASSERT(rc == 0); 4847 SPDK_CU_ASSERT_FATAL(desc != NULL); 4848 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4849 io_ch = spdk_bdev_get_io_channel(desc); 4850 CU_ASSERT(io_ch != NULL); 4851 4852 g_io_done = false; 4853 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 4854 expected_io->md_buf = ext_io_opts.metadata; 4855 expected_io->ext_io_opts = &ext_io_opts; 4856 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 4857 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4858 4859 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 4860 4861 CU_ASSERT(rc == 0); 4862 CU_ASSERT(g_io_done == false); 4863 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4864 stub_complete_io(1); 4865 CU_ASSERT(g_io_done == true); 4866 4867 g_io_done = false; 4868 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 4869 expected_io->md_buf = ext_io_opts.metadata; 4870 expected_io->ext_io_opts = &ext_io_opts; 4871 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 4872 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4873 4874 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 4875 4876 CU_ASSERT(rc == 0); 4877 CU_ASSERT(g_io_done == false); 4878 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4879 stub_complete_io(1); 4880 CU_ASSERT(g_io_done == true); 4881 4882 spdk_put_io_channel(io_ch); 4883 spdk_bdev_close(desc); 4884 free_bdev(bdev); 4885 spdk_bdev_finish(bdev_fini_cb, NULL); 4886 poll_threads(); 4887 } 4888 4889 int 4890 main(int argc, char **argv) 4891 { 4892 CU_pSuite suite = NULL; 4893 unsigned int num_failures; 4894 4895 CU_set_error_action(CUEA_ABORT); 4896 CU_initialize_registry(); 4897 4898 suite = CU_add_suite("bdev", null_init, null_clean); 4899 4900 CU_ADD_TEST(suite, bytes_to_blocks_test); 4901 CU_ADD_TEST(suite, num_blocks_test); 4902 CU_ADD_TEST(suite, io_valid_test); 4903 CU_ADD_TEST(suite, open_write_test); 4904 CU_ADD_TEST(suite, alias_add_del_test); 4905 CU_ADD_TEST(suite, get_device_stat_test); 4906 CU_ADD_TEST(suite, bdev_io_types_test); 4907 CU_ADD_TEST(suite, bdev_io_wait_test); 4908 CU_ADD_TEST(suite, bdev_io_spans_split_test); 4909 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 4910 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 4911 CU_ADD_TEST(suite, bdev_io_mix_split_test); 4912 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 4913 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 4914 CU_ADD_TEST(suite, bdev_io_alignment); 4915 CU_ADD_TEST(suite, bdev_histograms); 4916 CU_ADD_TEST(suite, bdev_write_zeroes); 4917 CU_ADD_TEST(suite, bdev_compare_and_write); 4918 CU_ADD_TEST(suite, bdev_compare); 4919 CU_ADD_TEST(suite, bdev_zcopy_write); 4920 CU_ADD_TEST(suite, bdev_zcopy_read); 4921 CU_ADD_TEST(suite, bdev_open_while_hotremove); 4922 CU_ADD_TEST(suite, bdev_close_while_hotremove); 4923 CU_ADD_TEST(suite, bdev_open_ext); 4924 CU_ADD_TEST(suite, bdev_set_io_timeout); 4925 CU_ADD_TEST(suite, lba_range_overlap); 4926 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 4927 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 4928 CU_ADD_TEST(suite, lock_lba_range_overlapped); 4929 CU_ADD_TEST(suite, bdev_io_abort); 4930 CU_ADD_TEST(suite, bdev_unmap); 4931 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 4932 CU_ADD_TEST(suite, bdev_set_options_test); 4933 CU_ADD_TEST(suite, bdev_multi_allocation); 4934 CU_ADD_TEST(suite, bdev_get_memory_domains); 4935 CU_ADD_TEST(suite, bdev_writev_readv_ext); 4936 4937 allocate_cores(1); 4938 allocate_threads(1); 4939 set_thread(0); 4940 4941 CU_basic_set_mode(CU_BRM_VERBOSE); 4942 CU_basic_run_tests(); 4943 num_failures = CU_get_number_of_failures(); 4944 CU_cleanup_registry(); 4945 4946 free_threads(); 4947 free_cores(); 4948 4949 return num_failures; 4950 } 4951