1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 struct spdk_trace_histories *g_trace_histories; 46 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 47 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 48 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 49 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 50 uint16_t tpoint_id, uint8_t owner_type, 51 uint8_t object_type, uint8_t new_object, 52 uint8_t arg1_type, const char *arg1_name)); 53 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 54 uint32_t size, uint64_t object_id, uint64_t arg1)); 55 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 56 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 57 58 59 int g_status; 60 int g_count; 61 enum spdk_bdev_event_type g_event_type1; 62 enum spdk_bdev_event_type g_event_type2; 63 struct spdk_histogram_data *g_histogram; 64 void *g_unregister_arg; 65 int g_unregister_rc; 66 67 void 68 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 69 int *sc, int *sk, int *asc, int *ascq) 70 { 71 } 72 73 static int 74 null_init(void) 75 { 76 return 0; 77 } 78 79 static int 80 null_clean(void) 81 { 82 return 0; 83 } 84 85 static int 86 stub_destruct(void *ctx) 87 { 88 return 0; 89 } 90 91 struct ut_expected_io { 92 uint8_t type; 93 uint64_t offset; 94 uint64_t length; 95 int iovcnt; 96 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 97 void *md_buf; 98 TAILQ_ENTRY(ut_expected_io) link; 99 }; 100 101 struct bdev_ut_channel { 102 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 103 uint32_t outstanding_io_count; 104 TAILQ_HEAD(, ut_expected_io) expected_io; 105 }; 106 107 static bool g_io_done; 108 static struct spdk_bdev_io *g_bdev_io; 109 static enum spdk_bdev_io_status g_io_status; 110 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 111 static uint32_t g_bdev_ut_io_device; 112 static struct bdev_ut_channel *g_bdev_ut_channel; 113 static void *g_compare_read_buf; 114 static uint32_t g_compare_read_buf_len; 115 static void *g_compare_write_buf; 116 static uint32_t g_compare_write_buf_len; 117 static bool g_abort_done; 118 static enum spdk_bdev_io_status g_abort_status; 119 120 static struct ut_expected_io * 121 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 122 { 123 struct ut_expected_io *expected_io; 124 125 expected_io = calloc(1, sizeof(*expected_io)); 126 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 127 128 expected_io->type = type; 129 expected_io->offset = offset; 130 expected_io->length = length; 131 expected_io->iovcnt = iovcnt; 132 133 return expected_io; 134 } 135 136 static void 137 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 138 { 139 expected_io->iov[pos].iov_base = base; 140 expected_io->iov[pos].iov_len = len; 141 } 142 143 static void 144 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 145 { 146 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 147 struct ut_expected_io *expected_io; 148 struct iovec *iov, *expected_iov; 149 struct spdk_bdev_io *bio_to_abort; 150 int i; 151 152 g_bdev_io = bdev_io; 153 154 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 155 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 156 157 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 158 CU_ASSERT(g_compare_read_buf_len == len); 159 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 160 } 161 162 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 163 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 164 165 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 166 CU_ASSERT(g_compare_write_buf_len == len); 167 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 168 } 169 170 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 171 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 172 173 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 174 CU_ASSERT(g_compare_read_buf_len == len); 175 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 176 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 177 } 178 } 179 180 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 181 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 182 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 183 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 184 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 185 ch->outstanding_io_count--; 186 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 187 break; 188 } 189 } 190 } 191 } 192 193 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 194 ch->outstanding_io_count++; 195 196 expected_io = TAILQ_FIRST(&ch->expected_io); 197 if (expected_io == NULL) { 198 return; 199 } 200 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 201 202 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 203 CU_ASSERT(bdev_io->type == expected_io->type); 204 } 205 206 if (expected_io->md_buf != NULL) { 207 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 208 } 209 210 if (expected_io->length == 0) { 211 free(expected_io); 212 return; 213 } 214 215 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 216 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 217 218 if (expected_io->iovcnt == 0) { 219 free(expected_io); 220 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 221 return; 222 } 223 224 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 225 for (i = 0; i < expected_io->iovcnt; i++) { 226 iov = &bdev_io->u.bdev.iovs[i]; 227 expected_iov = &expected_io->iov[i]; 228 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 229 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 230 } 231 232 free(expected_io); 233 } 234 235 static void 236 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 237 struct spdk_bdev_io *bdev_io, bool success) 238 { 239 CU_ASSERT(success == true); 240 241 stub_submit_request(_ch, bdev_io); 242 } 243 244 static void 245 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 246 { 247 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 248 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 249 } 250 251 static uint32_t 252 stub_complete_io(uint32_t num_to_complete) 253 { 254 struct bdev_ut_channel *ch = g_bdev_ut_channel; 255 struct spdk_bdev_io *bdev_io; 256 static enum spdk_bdev_io_status io_status; 257 uint32_t num_completed = 0; 258 259 while (num_completed < num_to_complete) { 260 if (TAILQ_EMPTY(&ch->outstanding_io)) { 261 break; 262 } 263 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 264 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 265 ch->outstanding_io_count--; 266 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 267 g_io_exp_status; 268 spdk_bdev_io_complete(bdev_io, io_status); 269 num_completed++; 270 } 271 272 return num_completed; 273 } 274 275 static struct spdk_io_channel * 276 bdev_ut_get_io_channel(void *ctx) 277 { 278 return spdk_get_io_channel(&g_bdev_ut_io_device); 279 } 280 281 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 282 [SPDK_BDEV_IO_TYPE_READ] = true, 283 [SPDK_BDEV_IO_TYPE_WRITE] = true, 284 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 285 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 286 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 287 [SPDK_BDEV_IO_TYPE_RESET] = true, 288 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 289 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 290 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 291 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 292 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 293 [SPDK_BDEV_IO_TYPE_ABORT] = true, 294 }; 295 296 static void 297 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 298 { 299 g_io_types_supported[io_type] = enable; 300 } 301 302 static bool 303 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 304 { 305 return g_io_types_supported[io_type]; 306 } 307 308 static struct spdk_bdev_fn_table fn_table = { 309 .destruct = stub_destruct, 310 .submit_request = stub_submit_request, 311 .get_io_channel = bdev_ut_get_io_channel, 312 .io_type_supported = stub_io_type_supported, 313 }; 314 315 static int 316 bdev_ut_create_ch(void *io_device, void *ctx_buf) 317 { 318 struct bdev_ut_channel *ch = ctx_buf; 319 320 CU_ASSERT(g_bdev_ut_channel == NULL); 321 g_bdev_ut_channel = ch; 322 323 TAILQ_INIT(&ch->outstanding_io); 324 ch->outstanding_io_count = 0; 325 TAILQ_INIT(&ch->expected_io); 326 return 0; 327 } 328 329 static void 330 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 331 { 332 CU_ASSERT(g_bdev_ut_channel != NULL); 333 g_bdev_ut_channel = NULL; 334 } 335 336 struct spdk_bdev_module bdev_ut_if; 337 338 static int 339 bdev_ut_module_init(void) 340 { 341 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 342 sizeof(struct bdev_ut_channel), NULL); 343 spdk_bdev_module_init_done(&bdev_ut_if); 344 return 0; 345 } 346 347 static void 348 bdev_ut_module_fini(void) 349 { 350 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 351 } 352 353 struct spdk_bdev_module bdev_ut_if = { 354 .name = "bdev_ut", 355 .module_init = bdev_ut_module_init, 356 .module_fini = bdev_ut_module_fini, 357 .async_init = true, 358 }; 359 360 static void vbdev_ut_examine(struct spdk_bdev *bdev); 361 362 static int 363 vbdev_ut_module_init(void) 364 { 365 return 0; 366 } 367 368 static void 369 vbdev_ut_module_fini(void) 370 { 371 } 372 373 struct spdk_bdev_module vbdev_ut_if = { 374 .name = "vbdev_ut", 375 .module_init = vbdev_ut_module_init, 376 .module_fini = vbdev_ut_module_fini, 377 .examine_config = vbdev_ut_examine, 378 }; 379 380 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 381 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 382 383 static void 384 vbdev_ut_examine(struct spdk_bdev *bdev) 385 { 386 spdk_bdev_module_examine_done(&vbdev_ut_if); 387 } 388 389 static struct spdk_bdev * 390 allocate_bdev(char *name) 391 { 392 struct spdk_bdev *bdev; 393 int rc; 394 395 bdev = calloc(1, sizeof(*bdev)); 396 SPDK_CU_ASSERT_FATAL(bdev != NULL); 397 398 bdev->name = name; 399 bdev->fn_table = &fn_table; 400 bdev->module = &bdev_ut_if; 401 bdev->blockcnt = 1024; 402 bdev->blocklen = 512; 403 404 rc = spdk_bdev_register(bdev); 405 CU_ASSERT(rc == 0); 406 407 return bdev; 408 } 409 410 static struct spdk_bdev * 411 allocate_vbdev(char *name) 412 { 413 struct spdk_bdev *bdev; 414 int rc; 415 416 bdev = calloc(1, sizeof(*bdev)); 417 SPDK_CU_ASSERT_FATAL(bdev != NULL); 418 419 bdev->name = name; 420 bdev->fn_table = &fn_table; 421 bdev->module = &vbdev_ut_if; 422 423 rc = spdk_bdev_register(bdev); 424 CU_ASSERT(rc == 0); 425 426 return bdev; 427 } 428 429 static void 430 free_bdev(struct spdk_bdev *bdev) 431 { 432 spdk_bdev_unregister(bdev, NULL, NULL); 433 poll_threads(); 434 memset(bdev, 0xFF, sizeof(*bdev)); 435 free(bdev); 436 } 437 438 static void 439 free_vbdev(struct spdk_bdev *bdev) 440 { 441 spdk_bdev_unregister(bdev, NULL, NULL); 442 poll_threads(); 443 memset(bdev, 0xFF, sizeof(*bdev)); 444 free(bdev); 445 } 446 447 static void 448 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 449 { 450 const char *bdev_name; 451 452 CU_ASSERT(bdev != NULL); 453 CU_ASSERT(rc == 0); 454 bdev_name = spdk_bdev_get_name(bdev); 455 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 456 457 free(stat); 458 459 *(bool *)cb_arg = true; 460 } 461 462 static void 463 bdev_unregister_cb(void *cb_arg, int rc) 464 { 465 g_unregister_arg = cb_arg; 466 g_unregister_rc = rc; 467 } 468 469 static void 470 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 471 { 472 } 473 474 static void 475 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 476 { 477 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 478 479 g_event_type1 = type; 480 if (SPDK_BDEV_EVENT_REMOVE == type) { 481 spdk_bdev_close(desc); 482 } 483 } 484 485 static void 486 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 487 { 488 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 489 490 g_event_type2 = type; 491 if (SPDK_BDEV_EVENT_REMOVE == type) { 492 spdk_bdev_close(desc); 493 } 494 } 495 496 static void 497 get_device_stat_test(void) 498 { 499 struct spdk_bdev *bdev; 500 struct spdk_bdev_io_stat *stat; 501 bool done; 502 503 bdev = allocate_bdev("bdev0"); 504 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 505 if (stat == NULL) { 506 free_bdev(bdev); 507 return; 508 } 509 510 done = false; 511 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 512 while (!done) { poll_threads(); } 513 514 free_bdev(bdev); 515 } 516 517 static void 518 open_write_test(void) 519 { 520 struct spdk_bdev *bdev[9]; 521 struct spdk_bdev_desc *desc[9] = {}; 522 int rc; 523 524 /* 525 * Create a tree of bdevs to test various open w/ write cases. 526 * 527 * bdev0 through bdev3 are physical block devices, such as NVMe 528 * namespaces or Ceph block devices. 529 * 530 * bdev4 is a virtual bdev with multiple base bdevs. This models 531 * caching or RAID use cases. 532 * 533 * bdev5 through bdev7 are all virtual bdevs with the same base 534 * bdev (except bdev7). This models partitioning or logical volume 535 * use cases. 536 * 537 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 538 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 539 * models caching, RAID, partitioning or logical volumes use cases. 540 * 541 * bdev8 is a virtual bdev with multiple base bdevs, but these 542 * base bdevs are themselves virtual bdevs. 543 * 544 * bdev8 545 * | 546 * +----------+ 547 * | | 548 * bdev4 bdev5 bdev6 bdev7 549 * | | | | 550 * +---+---+ +---+ + +---+---+ 551 * | | \ | / \ 552 * bdev0 bdev1 bdev2 bdev3 553 */ 554 555 bdev[0] = allocate_bdev("bdev0"); 556 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 557 CU_ASSERT(rc == 0); 558 559 bdev[1] = allocate_bdev("bdev1"); 560 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 561 CU_ASSERT(rc == 0); 562 563 bdev[2] = allocate_bdev("bdev2"); 564 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 565 CU_ASSERT(rc == 0); 566 567 bdev[3] = allocate_bdev("bdev3"); 568 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 569 CU_ASSERT(rc == 0); 570 571 bdev[4] = allocate_vbdev("bdev4"); 572 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 573 CU_ASSERT(rc == 0); 574 575 bdev[5] = allocate_vbdev("bdev5"); 576 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 577 CU_ASSERT(rc == 0); 578 579 bdev[6] = allocate_vbdev("bdev6"); 580 581 bdev[7] = allocate_vbdev("bdev7"); 582 583 bdev[8] = allocate_vbdev("bdev8"); 584 585 /* Open bdev0 read-only. This should succeed. */ 586 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 587 CU_ASSERT(rc == 0); 588 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 589 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 590 spdk_bdev_close(desc[0]); 591 592 /* 593 * Open bdev1 read/write. This should fail since bdev1 has been claimed 594 * by a vbdev module. 595 */ 596 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 597 CU_ASSERT(rc == -EPERM); 598 599 /* 600 * Open bdev4 read/write. This should fail since bdev3 has been claimed 601 * by a vbdev module. 602 */ 603 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 604 CU_ASSERT(rc == -EPERM); 605 606 /* Open bdev4 read-only. This should succeed. */ 607 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 608 CU_ASSERT(rc == 0); 609 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 610 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 611 spdk_bdev_close(desc[4]); 612 613 /* 614 * Open bdev8 read/write. This should succeed since it is a leaf 615 * bdev. 616 */ 617 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 618 CU_ASSERT(rc == 0); 619 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 620 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 621 spdk_bdev_close(desc[8]); 622 623 /* 624 * Open bdev5 read/write. This should fail since bdev4 has been claimed 625 * by a vbdev module. 626 */ 627 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 628 CU_ASSERT(rc == -EPERM); 629 630 /* Open bdev4 read-only. This should succeed. */ 631 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 632 CU_ASSERT(rc == 0); 633 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 634 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 635 spdk_bdev_close(desc[5]); 636 637 free_vbdev(bdev[8]); 638 639 free_vbdev(bdev[5]); 640 free_vbdev(bdev[6]); 641 free_vbdev(bdev[7]); 642 643 free_vbdev(bdev[4]); 644 645 free_bdev(bdev[0]); 646 free_bdev(bdev[1]); 647 free_bdev(bdev[2]); 648 free_bdev(bdev[3]); 649 } 650 651 static void 652 bytes_to_blocks_test(void) 653 { 654 struct spdk_bdev bdev; 655 uint64_t offset_blocks, num_blocks; 656 657 memset(&bdev, 0, sizeof(bdev)); 658 659 bdev.blocklen = 512; 660 661 /* All parameters valid */ 662 offset_blocks = 0; 663 num_blocks = 0; 664 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 665 CU_ASSERT(offset_blocks == 1); 666 CU_ASSERT(num_blocks == 2); 667 668 /* Offset not a block multiple */ 669 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 670 671 /* Length not a block multiple */ 672 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 673 674 /* In case blocklen not the power of two */ 675 bdev.blocklen = 100; 676 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 677 CU_ASSERT(offset_blocks == 1); 678 CU_ASSERT(num_blocks == 2); 679 680 /* Offset not a block multiple */ 681 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 682 683 /* Length not a block multiple */ 684 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 685 } 686 687 static void 688 num_blocks_test(void) 689 { 690 struct spdk_bdev bdev; 691 struct spdk_bdev_desc *desc = NULL; 692 int rc; 693 694 memset(&bdev, 0, sizeof(bdev)); 695 bdev.name = "num_blocks"; 696 bdev.fn_table = &fn_table; 697 bdev.module = &bdev_ut_if; 698 spdk_bdev_register(&bdev); 699 spdk_bdev_notify_blockcnt_change(&bdev, 50); 700 701 /* Growing block number */ 702 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 703 /* Shrinking block number */ 704 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 705 706 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 707 CU_ASSERT(rc == 0); 708 SPDK_CU_ASSERT_FATAL(desc != NULL); 709 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 710 711 /* Growing block number */ 712 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 713 /* Shrinking block number */ 714 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 715 716 g_event_type1 = 0xFF; 717 /* Growing block number */ 718 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 719 720 poll_threads(); 721 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 722 723 g_event_type1 = 0xFF; 724 /* Growing block number and closing */ 725 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 726 727 spdk_bdev_close(desc); 728 spdk_bdev_unregister(&bdev, NULL, NULL); 729 730 poll_threads(); 731 732 /* Callback is not called for closed device */ 733 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 734 } 735 736 static void 737 io_valid_test(void) 738 { 739 struct spdk_bdev bdev; 740 741 memset(&bdev, 0, sizeof(bdev)); 742 743 bdev.blocklen = 512; 744 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 745 746 spdk_bdev_notify_blockcnt_change(&bdev, 100); 747 748 /* All parameters valid */ 749 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 750 751 /* Last valid block */ 752 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 753 754 /* Offset past end of bdev */ 755 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 756 757 /* Offset + length past end of bdev */ 758 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 759 760 /* Offset near end of uint64_t range (2^64 - 1) */ 761 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 762 763 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 764 } 765 766 static void 767 alias_add_del_test(void) 768 { 769 struct spdk_bdev *bdev[3]; 770 int rc; 771 772 /* Creating and registering bdevs */ 773 bdev[0] = allocate_bdev("bdev0"); 774 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 775 776 bdev[1] = allocate_bdev("bdev1"); 777 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 778 779 bdev[2] = allocate_bdev("bdev2"); 780 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 781 782 poll_threads(); 783 784 /* 785 * Trying adding an alias identical to name. 786 * Alias is identical to name, so it can not be added to aliases list 787 */ 788 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 789 CU_ASSERT(rc == -EEXIST); 790 791 /* 792 * Trying to add empty alias, 793 * this one should fail 794 */ 795 rc = spdk_bdev_alias_add(bdev[0], NULL); 796 CU_ASSERT(rc == -EINVAL); 797 798 /* Trying adding same alias to two different registered bdevs */ 799 800 /* Alias is used first time, so this one should pass */ 801 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 802 CU_ASSERT(rc == 0); 803 804 /* Alias was added to another bdev, so this one should fail */ 805 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 806 CU_ASSERT(rc == -EEXIST); 807 808 /* Alias is used first time, so this one should pass */ 809 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 810 CU_ASSERT(rc == 0); 811 812 /* Trying removing an alias from registered bdevs */ 813 814 /* Alias is not on a bdev aliases list, so this one should fail */ 815 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 816 CU_ASSERT(rc == -ENOENT); 817 818 /* Alias is present on a bdev aliases list, so this one should pass */ 819 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 820 CU_ASSERT(rc == 0); 821 822 /* Alias is present on a bdev aliases list, so this one should pass */ 823 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 824 CU_ASSERT(rc == 0); 825 826 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 827 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 828 CU_ASSERT(rc != 0); 829 830 /* Trying to del all alias from empty alias list */ 831 spdk_bdev_alias_del_all(bdev[2]); 832 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 833 834 /* Trying to del all alias from non-empty alias list */ 835 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 836 CU_ASSERT(rc == 0); 837 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 838 CU_ASSERT(rc == 0); 839 spdk_bdev_alias_del_all(bdev[2]); 840 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 841 842 /* Unregister and free bdevs */ 843 spdk_bdev_unregister(bdev[0], NULL, NULL); 844 spdk_bdev_unregister(bdev[1], NULL, NULL); 845 spdk_bdev_unregister(bdev[2], NULL, NULL); 846 847 poll_threads(); 848 849 free(bdev[0]); 850 free(bdev[1]); 851 free(bdev[2]); 852 } 853 854 static void 855 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 856 { 857 g_io_done = true; 858 g_io_status = bdev_io->internal.status; 859 spdk_bdev_free_io(bdev_io); 860 } 861 862 static void 863 bdev_init_cb(void *arg, int rc) 864 { 865 CU_ASSERT(rc == 0); 866 } 867 868 static void 869 bdev_fini_cb(void *arg) 870 { 871 } 872 873 struct bdev_ut_io_wait_entry { 874 struct spdk_bdev_io_wait_entry entry; 875 struct spdk_io_channel *io_ch; 876 struct spdk_bdev_desc *desc; 877 bool submitted; 878 }; 879 880 static void 881 io_wait_cb(void *arg) 882 { 883 struct bdev_ut_io_wait_entry *entry = arg; 884 int rc; 885 886 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 887 CU_ASSERT(rc == 0); 888 entry->submitted = true; 889 } 890 891 static void 892 bdev_io_types_test(void) 893 { 894 struct spdk_bdev *bdev; 895 struct spdk_bdev_desc *desc = NULL; 896 struct spdk_io_channel *io_ch; 897 struct spdk_bdev_opts bdev_opts = {}; 898 int rc; 899 900 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 901 bdev_opts.bdev_io_pool_size = 4; 902 bdev_opts.bdev_io_cache_size = 2; 903 904 rc = spdk_bdev_set_opts(&bdev_opts); 905 CU_ASSERT(rc == 0); 906 spdk_bdev_initialize(bdev_init_cb, NULL); 907 poll_threads(); 908 909 bdev = allocate_bdev("bdev0"); 910 911 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 912 CU_ASSERT(rc == 0); 913 poll_threads(); 914 SPDK_CU_ASSERT_FATAL(desc != NULL); 915 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 916 io_ch = spdk_bdev_get_io_channel(desc); 917 CU_ASSERT(io_ch != NULL); 918 919 /* WRITE and WRITE ZEROES are not supported */ 920 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 921 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 922 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 923 CU_ASSERT(rc == -ENOTSUP); 924 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 925 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 926 927 spdk_put_io_channel(io_ch); 928 spdk_bdev_close(desc); 929 free_bdev(bdev); 930 spdk_bdev_finish(bdev_fini_cb, NULL); 931 poll_threads(); 932 } 933 934 static void 935 bdev_io_wait_test(void) 936 { 937 struct spdk_bdev *bdev; 938 struct spdk_bdev_desc *desc = NULL; 939 struct spdk_io_channel *io_ch; 940 struct spdk_bdev_opts bdev_opts = {}; 941 struct bdev_ut_io_wait_entry io_wait_entry; 942 struct bdev_ut_io_wait_entry io_wait_entry2; 943 int rc; 944 945 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 946 bdev_opts.bdev_io_pool_size = 4; 947 bdev_opts.bdev_io_cache_size = 2; 948 949 rc = spdk_bdev_set_opts(&bdev_opts); 950 CU_ASSERT(rc == 0); 951 spdk_bdev_initialize(bdev_init_cb, NULL); 952 poll_threads(); 953 954 bdev = allocate_bdev("bdev0"); 955 956 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 957 CU_ASSERT(rc == 0); 958 poll_threads(); 959 SPDK_CU_ASSERT_FATAL(desc != NULL); 960 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 961 io_ch = spdk_bdev_get_io_channel(desc); 962 CU_ASSERT(io_ch != NULL); 963 964 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 965 CU_ASSERT(rc == 0); 966 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 967 CU_ASSERT(rc == 0); 968 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 969 CU_ASSERT(rc == 0); 970 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 971 CU_ASSERT(rc == 0); 972 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 973 974 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 975 CU_ASSERT(rc == -ENOMEM); 976 977 io_wait_entry.entry.bdev = bdev; 978 io_wait_entry.entry.cb_fn = io_wait_cb; 979 io_wait_entry.entry.cb_arg = &io_wait_entry; 980 io_wait_entry.io_ch = io_ch; 981 io_wait_entry.desc = desc; 982 io_wait_entry.submitted = false; 983 /* Cannot use the same io_wait_entry for two different calls. */ 984 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 985 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 986 987 /* Queue two I/O waits. */ 988 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 989 CU_ASSERT(rc == 0); 990 CU_ASSERT(io_wait_entry.submitted == false); 991 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 992 CU_ASSERT(rc == 0); 993 CU_ASSERT(io_wait_entry2.submitted == false); 994 995 stub_complete_io(1); 996 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 997 CU_ASSERT(io_wait_entry.submitted == true); 998 CU_ASSERT(io_wait_entry2.submitted == false); 999 1000 stub_complete_io(1); 1001 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1002 CU_ASSERT(io_wait_entry2.submitted == true); 1003 1004 stub_complete_io(4); 1005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1006 1007 spdk_put_io_channel(io_ch); 1008 spdk_bdev_close(desc); 1009 free_bdev(bdev); 1010 spdk_bdev_finish(bdev_fini_cb, NULL); 1011 poll_threads(); 1012 } 1013 1014 static void 1015 bdev_io_spans_split_test(void) 1016 { 1017 struct spdk_bdev bdev; 1018 struct spdk_bdev_io bdev_io; 1019 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1020 1021 memset(&bdev, 0, sizeof(bdev)); 1022 bdev_io.u.bdev.iovs = iov; 1023 1024 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1025 bdev.optimal_io_boundary = 0; 1026 bdev.max_segment_size = 0; 1027 bdev.max_num_segments = 0; 1028 bdev_io.bdev = &bdev; 1029 1030 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1031 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1032 1033 bdev.split_on_optimal_io_boundary = true; 1034 bdev.optimal_io_boundary = 32; 1035 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1036 1037 /* RESETs are not based on LBAs - so this should return false. */ 1038 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1039 1040 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1041 bdev_io.u.bdev.offset_blocks = 0; 1042 bdev_io.u.bdev.num_blocks = 32; 1043 1044 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1045 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1046 1047 bdev_io.u.bdev.num_blocks = 33; 1048 1049 /* This I/O spans a boundary. */ 1050 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1051 1052 bdev_io.u.bdev.num_blocks = 32; 1053 bdev.max_segment_size = 512 * 32; 1054 bdev.max_num_segments = 1; 1055 bdev_io.u.bdev.iovcnt = 1; 1056 iov[0].iov_len = 512; 1057 1058 /* Does not cross and exceed max_size or max_segs */ 1059 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1060 1061 bdev.split_on_optimal_io_boundary = false; 1062 bdev.max_segment_size = 512; 1063 bdev.max_num_segments = 1; 1064 bdev_io.u.bdev.iovcnt = 2; 1065 1066 /* Exceed max_segs */ 1067 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1068 1069 bdev.max_num_segments = 2; 1070 iov[0].iov_len = 513; 1071 iov[1].iov_len = 512; 1072 1073 /* Exceed max_sizes */ 1074 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1075 } 1076 1077 static void 1078 bdev_io_boundary_split_test(void) 1079 { 1080 struct spdk_bdev *bdev; 1081 struct spdk_bdev_desc *desc = NULL; 1082 struct spdk_io_channel *io_ch; 1083 struct spdk_bdev_opts bdev_opts = {}; 1084 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1085 struct ut_expected_io *expected_io; 1086 void *md_buf = (void *)0xFF000000; 1087 uint64_t i; 1088 int rc; 1089 1090 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1091 bdev_opts.bdev_io_pool_size = 512; 1092 bdev_opts.bdev_io_cache_size = 64; 1093 1094 rc = spdk_bdev_set_opts(&bdev_opts); 1095 CU_ASSERT(rc == 0); 1096 spdk_bdev_initialize(bdev_init_cb, NULL); 1097 1098 bdev = allocate_bdev("bdev0"); 1099 1100 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1101 CU_ASSERT(rc == 0); 1102 SPDK_CU_ASSERT_FATAL(desc != NULL); 1103 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1104 io_ch = spdk_bdev_get_io_channel(desc); 1105 CU_ASSERT(io_ch != NULL); 1106 1107 bdev->optimal_io_boundary = 16; 1108 bdev->split_on_optimal_io_boundary = false; 1109 1110 g_io_done = false; 1111 1112 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1113 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1114 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1115 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1116 1117 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1118 CU_ASSERT(rc == 0); 1119 CU_ASSERT(g_io_done == false); 1120 1121 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1122 stub_complete_io(1); 1123 CU_ASSERT(g_io_done == true); 1124 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1125 1126 bdev->split_on_optimal_io_boundary = true; 1127 bdev->md_interleave = false; 1128 bdev->md_len = 8; 1129 1130 /* Now test that a single-vector command is split correctly. 1131 * Offset 14, length 8, payload 0xF000 1132 * Child - Offset 14, length 2, payload 0xF000 1133 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1134 * 1135 * Set up the expected values before calling spdk_bdev_read_blocks 1136 */ 1137 g_io_done = false; 1138 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1139 expected_io->md_buf = md_buf; 1140 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1141 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1142 1143 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1144 expected_io->md_buf = md_buf + 2 * 8; 1145 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1146 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1147 1148 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1149 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1150 14, 8, io_done, NULL); 1151 CU_ASSERT(rc == 0); 1152 CU_ASSERT(g_io_done == false); 1153 1154 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1155 stub_complete_io(2); 1156 CU_ASSERT(g_io_done == true); 1157 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1158 1159 /* Now set up a more complex, multi-vector command that needs to be split, 1160 * including splitting iovecs. 1161 */ 1162 iov[0].iov_base = (void *)0x10000; 1163 iov[0].iov_len = 512; 1164 iov[1].iov_base = (void *)0x20000; 1165 iov[1].iov_len = 20 * 512; 1166 iov[2].iov_base = (void *)0x30000; 1167 iov[2].iov_len = 11 * 512; 1168 1169 g_io_done = false; 1170 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1171 expected_io->md_buf = md_buf; 1172 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1173 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1174 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1175 1176 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1177 expected_io->md_buf = md_buf + 2 * 8; 1178 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1179 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1180 1181 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1182 expected_io->md_buf = md_buf + 18 * 8; 1183 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1184 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1185 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1186 1187 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1188 14, 32, io_done, NULL); 1189 CU_ASSERT(rc == 0); 1190 CU_ASSERT(g_io_done == false); 1191 1192 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1193 stub_complete_io(3); 1194 CU_ASSERT(g_io_done == true); 1195 1196 /* Test multi vector command that needs to be split by strip and then needs to be 1197 * split further due to the capacity of child iovs. 1198 */ 1199 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1200 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1201 iov[i].iov_len = 512; 1202 } 1203 1204 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1205 g_io_done = false; 1206 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1207 BDEV_IO_NUM_CHILD_IOV); 1208 expected_io->md_buf = md_buf; 1209 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1210 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1211 } 1212 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1213 1214 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1215 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1216 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1217 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1218 ut_expected_io_set_iov(expected_io, i, 1219 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1220 } 1221 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1222 1223 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1224 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1225 CU_ASSERT(rc == 0); 1226 CU_ASSERT(g_io_done == false); 1227 1228 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1229 stub_complete_io(1); 1230 CU_ASSERT(g_io_done == false); 1231 1232 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1233 stub_complete_io(1); 1234 CU_ASSERT(g_io_done == true); 1235 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1236 1237 /* Test multi vector command that needs to be split by strip and then needs to be 1238 * split further due to the capacity of child iovs. In this case, the length of 1239 * the rest of iovec array with an I/O boundary is the multiple of block size. 1240 */ 1241 1242 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1243 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1244 */ 1245 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1246 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1247 iov[i].iov_len = 512; 1248 } 1249 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1250 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1251 iov[i].iov_len = 256; 1252 } 1253 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1254 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1255 1256 /* Add an extra iovec to trigger split */ 1257 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1258 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1259 1260 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1261 g_io_done = false; 1262 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1263 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1264 expected_io->md_buf = md_buf; 1265 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1266 ut_expected_io_set_iov(expected_io, i, 1267 (void *)((i + 1) * 0x10000), 512); 1268 } 1269 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1270 ut_expected_io_set_iov(expected_io, i, 1271 (void *)((i + 1) * 0x10000), 256); 1272 } 1273 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1274 1275 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1276 1, 1); 1277 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1278 ut_expected_io_set_iov(expected_io, 0, 1279 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1280 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1281 1282 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1283 1, 1); 1284 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1285 ut_expected_io_set_iov(expected_io, 0, 1286 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1287 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1288 1289 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1290 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1291 CU_ASSERT(rc == 0); 1292 CU_ASSERT(g_io_done == false); 1293 1294 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1295 stub_complete_io(1); 1296 CU_ASSERT(g_io_done == false); 1297 1298 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1299 stub_complete_io(2); 1300 CU_ASSERT(g_io_done == true); 1301 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1302 1303 /* Test multi vector command that needs to be split by strip and then needs to be 1304 * split further due to the capacity of child iovs, the child request offset should 1305 * be rewind to last aligned offset and go success without error. 1306 */ 1307 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1308 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1309 iov[i].iov_len = 512; 1310 } 1311 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1312 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1313 1314 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1315 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1316 1317 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1318 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1319 1320 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1321 g_io_done = false; 1322 g_io_status = 0; 1323 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1324 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1325 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1326 expected_io->md_buf = md_buf; 1327 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1328 ut_expected_io_set_iov(expected_io, i, 1329 (void *)((i + 1) * 0x10000), 512); 1330 } 1331 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1332 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1333 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1334 1, 2); 1335 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1336 ut_expected_io_set_iov(expected_io, 0, 1337 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1338 ut_expected_io_set_iov(expected_io, 1, 1339 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1340 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1341 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1342 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1343 1, 1); 1344 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1345 ut_expected_io_set_iov(expected_io, 0, 1346 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1347 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1348 1349 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1350 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1351 CU_ASSERT(rc == 0); 1352 CU_ASSERT(g_io_done == false); 1353 1354 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1355 stub_complete_io(1); 1356 CU_ASSERT(g_io_done == false); 1357 1358 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1359 stub_complete_io(2); 1360 CU_ASSERT(g_io_done == true); 1361 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1362 1363 /* Test multi vector command that needs to be split due to the IO boundary and 1364 * the capacity of child iovs. Especially test the case when the command is 1365 * split due to the capacity of child iovs, the tail address is not aligned with 1366 * block size and is rewinded to the aligned address. 1367 * 1368 * The iovecs used in read request is complex but is based on the data 1369 * collected in the real issue. We change the base addresses but keep the lengths 1370 * not to loose the credibility of the test. 1371 */ 1372 bdev->optimal_io_boundary = 128; 1373 g_io_done = false; 1374 g_io_status = 0; 1375 1376 for (i = 0; i < 31; i++) { 1377 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1378 iov[i].iov_len = 1024; 1379 } 1380 iov[31].iov_base = (void *)0xFEED1F00000; 1381 iov[31].iov_len = 32768; 1382 iov[32].iov_base = (void *)0xFEED2000000; 1383 iov[32].iov_len = 160; 1384 iov[33].iov_base = (void *)0xFEED2100000; 1385 iov[33].iov_len = 4096; 1386 iov[34].iov_base = (void *)0xFEED2200000; 1387 iov[34].iov_len = 4096; 1388 iov[35].iov_base = (void *)0xFEED2300000; 1389 iov[35].iov_len = 4096; 1390 iov[36].iov_base = (void *)0xFEED2400000; 1391 iov[36].iov_len = 4096; 1392 iov[37].iov_base = (void *)0xFEED2500000; 1393 iov[37].iov_len = 4096; 1394 iov[38].iov_base = (void *)0xFEED2600000; 1395 iov[38].iov_len = 4096; 1396 iov[39].iov_base = (void *)0xFEED2700000; 1397 iov[39].iov_len = 4096; 1398 iov[40].iov_base = (void *)0xFEED2800000; 1399 iov[40].iov_len = 4096; 1400 iov[41].iov_base = (void *)0xFEED2900000; 1401 iov[41].iov_len = 4096; 1402 iov[42].iov_base = (void *)0xFEED2A00000; 1403 iov[42].iov_len = 4096; 1404 iov[43].iov_base = (void *)0xFEED2B00000; 1405 iov[43].iov_len = 12288; 1406 iov[44].iov_base = (void *)0xFEED2C00000; 1407 iov[44].iov_len = 8192; 1408 iov[45].iov_base = (void *)0xFEED2F00000; 1409 iov[45].iov_len = 4096; 1410 iov[46].iov_base = (void *)0xFEED3000000; 1411 iov[46].iov_len = 4096; 1412 iov[47].iov_base = (void *)0xFEED3100000; 1413 iov[47].iov_len = 4096; 1414 iov[48].iov_base = (void *)0xFEED3200000; 1415 iov[48].iov_len = 24576; 1416 iov[49].iov_base = (void *)0xFEED3300000; 1417 iov[49].iov_len = 16384; 1418 iov[50].iov_base = (void *)0xFEED3400000; 1419 iov[50].iov_len = 12288; 1420 iov[51].iov_base = (void *)0xFEED3500000; 1421 iov[51].iov_len = 4096; 1422 iov[52].iov_base = (void *)0xFEED3600000; 1423 iov[52].iov_len = 4096; 1424 iov[53].iov_base = (void *)0xFEED3700000; 1425 iov[53].iov_len = 4096; 1426 iov[54].iov_base = (void *)0xFEED3800000; 1427 iov[54].iov_len = 28672; 1428 iov[55].iov_base = (void *)0xFEED3900000; 1429 iov[55].iov_len = 20480; 1430 iov[56].iov_base = (void *)0xFEED3A00000; 1431 iov[56].iov_len = 4096; 1432 iov[57].iov_base = (void *)0xFEED3B00000; 1433 iov[57].iov_len = 12288; 1434 iov[58].iov_base = (void *)0xFEED3C00000; 1435 iov[58].iov_len = 4096; 1436 iov[59].iov_base = (void *)0xFEED3D00000; 1437 iov[59].iov_len = 4096; 1438 iov[60].iov_base = (void *)0xFEED3E00000; 1439 iov[60].iov_len = 352; 1440 1441 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1442 * of child iovs, 1443 */ 1444 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1445 expected_io->md_buf = md_buf; 1446 for (i = 0; i < 32; i++) { 1447 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1448 } 1449 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1450 1451 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1452 * split by the IO boundary requirement. 1453 */ 1454 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1455 expected_io->md_buf = md_buf + 126 * 8; 1456 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1457 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1458 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1459 1460 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1461 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1462 */ 1463 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1464 expected_io->md_buf = md_buf + 128 * 8; 1465 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1466 iov[33].iov_len - 864); 1467 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1468 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1469 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1470 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1471 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1472 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1473 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1474 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1475 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1476 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1477 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1478 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1479 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1480 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1481 1482 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1483 * first 864 bytes of iov[52] split by the IO boundary requirement. 1484 */ 1485 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1486 expected_io->md_buf = md_buf + 256 * 8; 1487 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1488 iov[46].iov_len - 864); 1489 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1490 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1491 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1492 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1493 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1494 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1495 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1496 1497 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1498 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1499 */ 1500 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1501 expected_io->md_buf = md_buf + 384 * 8; 1502 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1503 iov[52].iov_len - 864); 1504 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1505 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1506 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1507 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1508 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1509 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1510 1511 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1512 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1513 */ 1514 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1515 expected_io->md_buf = md_buf + 512 * 8; 1516 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1517 iov[57].iov_len - 4960); 1518 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1519 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1520 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1521 1522 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1523 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1524 expected_io->md_buf = md_buf + 542 * 8; 1525 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1526 iov[59].iov_len - 3936); 1527 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1528 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1529 1530 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1531 0, 543, io_done, NULL); 1532 CU_ASSERT(rc == 0); 1533 CU_ASSERT(g_io_done == false); 1534 1535 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1536 stub_complete_io(1); 1537 CU_ASSERT(g_io_done == false); 1538 1539 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1540 stub_complete_io(5); 1541 CU_ASSERT(g_io_done == false); 1542 1543 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1544 stub_complete_io(1); 1545 CU_ASSERT(g_io_done == true); 1546 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1547 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1548 1549 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1550 * split, so test that. 1551 */ 1552 bdev->optimal_io_boundary = 15; 1553 g_io_done = false; 1554 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1555 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1556 1557 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1558 CU_ASSERT(rc == 0); 1559 CU_ASSERT(g_io_done == false); 1560 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1561 stub_complete_io(1); 1562 CU_ASSERT(g_io_done == true); 1563 1564 /* Test an UNMAP. This should also not be split. */ 1565 bdev->optimal_io_boundary = 16; 1566 g_io_done = false; 1567 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1568 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1569 1570 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1571 CU_ASSERT(rc == 0); 1572 CU_ASSERT(g_io_done == false); 1573 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1574 stub_complete_io(1); 1575 CU_ASSERT(g_io_done == true); 1576 1577 /* Test a FLUSH. This should also not be split. */ 1578 bdev->optimal_io_boundary = 16; 1579 g_io_done = false; 1580 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1581 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1582 1583 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1584 CU_ASSERT(rc == 0); 1585 CU_ASSERT(g_io_done == false); 1586 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1587 stub_complete_io(1); 1588 CU_ASSERT(g_io_done == true); 1589 1590 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1591 1592 /* Children requests return an error status */ 1593 bdev->optimal_io_boundary = 16; 1594 iov[0].iov_base = (void *)0x10000; 1595 iov[0].iov_len = 512 * 64; 1596 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1597 g_io_done = false; 1598 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1599 1600 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1601 CU_ASSERT(rc == 0); 1602 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1603 stub_complete_io(4); 1604 CU_ASSERT(g_io_done == false); 1605 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1606 stub_complete_io(1); 1607 CU_ASSERT(g_io_done == true); 1608 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1609 1610 /* Test if a multi vector command terminated with failure before continueing 1611 * splitting process when one of child I/O failed. 1612 * The multi vector command is as same as the above that needs to be split by strip 1613 * and then needs to be split further due to the capacity of child iovs. 1614 */ 1615 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1616 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1617 iov[i].iov_len = 512; 1618 } 1619 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1620 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1621 1622 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1623 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1624 1625 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1626 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1627 1628 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1629 1630 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1631 g_io_done = false; 1632 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1633 1634 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1635 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1636 CU_ASSERT(rc == 0); 1637 CU_ASSERT(g_io_done == false); 1638 1639 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1640 stub_complete_io(1); 1641 CU_ASSERT(g_io_done == true); 1642 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1643 1644 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1645 1646 /* for this test we will create the following conditions to hit the code path where 1647 * we are trying to send and IO following a split that has no iovs because we had to 1648 * trim them for alignment reasons. 1649 * 1650 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1651 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1652 * position 30 and overshoot by 0x2e. 1653 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1654 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1655 * which eliniates that vector so we just send the first split IO with 30 vectors 1656 * and let the completion pick up the last 2 vectors. 1657 */ 1658 bdev->optimal_io_boundary = 32; 1659 bdev->split_on_optimal_io_boundary = true; 1660 g_io_done = false; 1661 1662 /* Init all parent IOVs to 0x212 */ 1663 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1664 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1665 iov[i].iov_len = 0x212; 1666 } 1667 1668 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1669 BDEV_IO_NUM_CHILD_IOV - 1); 1670 /* expect 0-29 to be 1:1 with the parent iov */ 1671 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1672 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1673 } 1674 1675 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1676 * where 0x1e is the amount we overshot the 16K boundary 1677 */ 1678 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1679 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1680 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1681 1682 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1683 * shortened that take it to the next boundary and then a final one to get us to 1684 * 0x4200 bytes for the IO. 1685 */ 1686 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1687 BDEV_IO_NUM_CHILD_IOV, 2); 1688 /* position 30 picked up the remaining bytes to the next boundary */ 1689 ut_expected_io_set_iov(expected_io, 0, 1690 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1691 1692 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1693 ut_expected_io_set_iov(expected_io, 1, 1694 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1695 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1696 1697 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1698 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1699 CU_ASSERT(rc == 0); 1700 CU_ASSERT(g_io_done == false); 1701 1702 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1703 stub_complete_io(1); 1704 CU_ASSERT(g_io_done == false); 1705 1706 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1707 stub_complete_io(1); 1708 CU_ASSERT(g_io_done == true); 1709 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1710 1711 spdk_put_io_channel(io_ch); 1712 spdk_bdev_close(desc); 1713 free_bdev(bdev); 1714 spdk_bdev_finish(bdev_fini_cb, NULL); 1715 poll_threads(); 1716 } 1717 1718 static void 1719 bdev_io_max_size_and_segment_split_test(void) 1720 { 1721 struct spdk_bdev *bdev; 1722 struct spdk_bdev_desc *desc = NULL; 1723 struct spdk_io_channel *io_ch; 1724 struct spdk_bdev_opts bdev_opts = {}; 1725 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1726 struct ut_expected_io *expected_io; 1727 uint64_t i; 1728 int rc; 1729 1730 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1731 bdev_opts.bdev_io_pool_size = 512; 1732 bdev_opts.bdev_io_cache_size = 64; 1733 1734 bdev_opts.opts_size = sizeof(bdev_opts); 1735 rc = spdk_bdev_set_opts(&bdev_opts); 1736 CU_ASSERT(rc == 0); 1737 spdk_bdev_initialize(bdev_init_cb, NULL); 1738 1739 bdev = allocate_bdev("bdev0"); 1740 1741 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1742 CU_ASSERT(rc == 0); 1743 SPDK_CU_ASSERT_FATAL(desc != NULL); 1744 io_ch = spdk_bdev_get_io_channel(desc); 1745 CU_ASSERT(io_ch != NULL); 1746 1747 bdev->split_on_optimal_io_boundary = false; 1748 bdev->optimal_io_boundary = 0; 1749 1750 /* Case 0 max_num_segments == 0. 1751 * but segment size 2 * 512 > 512 1752 */ 1753 bdev->max_segment_size = 512; 1754 bdev->max_num_segments = 0; 1755 g_io_done = false; 1756 1757 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1758 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1759 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1760 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1761 1762 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1763 CU_ASSERT(rc == 0); 1764 CU_ASSERT(g_io_done == false); 1765 1766 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1767 stub_complete_io(1); 1768 CU_ASSERT(g_io_done == true); 1769 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1770 1771 /* Case 1 max_segment_size == 0 1772 * but iov num 2 > 1. 1773 */ 1774 bdev->max_segment_size = 0; 1775 bdev->max_num_segments = 1; 1776 g_io_done = false; 1777 1778 iov[0].iov_base = (void *)0x10000; 1779 iov[0].iov_len = 512; 1780 iov[1].iov_base = (void *)0x20000; 1781 iov[1].iov_len = 8 * 512; 1782 1783 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1784 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1785 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1786 1787 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1788 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1789 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1790 1791 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1792 CU_ASSERT(rc == 0); 1793 CU_ASSERT(g_io_done == false); 1794 1795 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1796 stub_complete_io(2); 1797 CU_ASSERT(g_io_done == true); 1798 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1799 1800 /* Test that a non-vector command is split correctly. 1801 * Set up the expected values before calling spdk_bdev_read_blocks 1802 */ 1803 bdev->max_segment_size = 512; 1804 bdev->max_num_segments = 1; 1805 g_io_done = false; 1806 1807 /* Child IO 0 */ 1808 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1809 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1810 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1811 1812 /* Child IO 1 */ 1813 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1814 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1815 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1816 1817 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1818 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1819 CU_ASSERT(rc == 0); 1820 CU_ASSERT(g_io_done == false); 1821 1822 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1823 stub_complete_io(2); 1824 CU_ASSERT(g_io_done == true); 1825 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1826 1827 /* Now set up a more complex, multi-vector command that needs to be split, 1828 * including splitting iovecs. 1829 */ 1830 bdev->max_segment_size = 2 * 512; 1831 bdev->max_num_segments = 1; 1832 g_io_done = false; 1833 1834 iov[0].iov_base = (void *)0x10000; 1835 iov[0].iov_len = 2 * 512; 1836 iov[1].iov_base = (void *)0x20000; 1837 iov[1].iov_len = 4 * 512; 1838 iov[2].iov_base = (void *)0x30000; 1839 iov[2].iov_len = 6 * 512; 1840 1841 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1842 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1843 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1844 1845 /* Split iov[1].size to 2 iov entries then split the segments */ 1846 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1847 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1848 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1849 1850 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1851 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1852 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1853 1854 /* Split iov[2].size to 3 iov entries then split the segments */ 1855 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1856 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1857 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1858 1859 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1860 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1861 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1862 1863 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1864 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1865 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1866 1867 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1868 CU_ASSERT(rc == 0); 1869 CU_ASSERT(g_io_done == false); 1870 1871 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 1872 stub_complete_io(6); 1873 CU_ASSERT(g_io_done == true); 1874 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1875 1876 /* Test multi vector command that needs to be split by strip and then needs to be 1877 * split further due to the capacity of parent IO child iovs. 1878 */ 1879 bdev->max_segment_size = 512; 1880 bdev->max_num_segments = 1; 1881 g_io_done = false; 1882 1883 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1884 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1885 iov[i].iov_len = 512 * 2; 1886 } 1887 1888 /* Each input iov.size is split into 2 iovs, 1889 * half of the input iov can fill all child iov entries of a single IO. 1890 */ 1891 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 1892 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 1893 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1894 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1895 1896 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 1897 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1898 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1899 } 1900 1901 /* The remaining iov is split in the second round */ 1902 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1903 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 1904 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1905 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1906 1907 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 1908 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1909 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1910 } 1911 1912 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 1913 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1914 CU_ASSERT(rc == 0); 1915 CU_ASSERT(g_io_done == false); 1916 1917 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1918 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1919 CU_ASSERT(g_io_done == false); 1920 1921 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1922 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1923 CU_ASSERT(g_io_done == true); 1924 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1925 1926 /* A wrong case, a child IO that is divided does 1927 * not meet the principle of multiples of block size, 1928 * and exits with error 1929 */ 1930 bdev->max_segment_size = 512; 1931 bdev->max_num_segments = 1; 1932 g_io_done = false; 1933 1934 iov[0].iov_base = (void *)0x10000; 1935 iov[0].iov_len = 512 + 256; 1936 iov[1].iov_base = (void *)0x20000; 1937 iov[1].iov_len = 256; 1938 1939 /* iov[0] is split to 512 and 256. 1940 * 256 is less than a block size, and it is found 1941 * in the next round of split that it is the first child IO smaller than 1942 * the block size, so the error exit 1943 */ 1944 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 1945 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 1946 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1947 1948 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 1949 CU_ASSERT(rc == 0); 1950 CU_ASSERT(g_io_done == false); 1951 1952 /* First child IO is OK */ 1953 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1954 stub_complete_io(1); 1955 CU_ASSERT(g_io_done == true); 1956 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1957 1958 /* error exit */ 1959 stub_complete_io(1); 1960 CU_ASSERT(g_io_done == true); 1961 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1962 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1963 1964 /* Test multi vector command that needs to be split by strip and then needs to be 1965 * split further due to the capacity of child iovs. 1966 * 1967 * In this case, the last two iovs need to be split, but it will exceed the capacity 1968 * of child iovs, so it needs to wait until the first batch completed. 1969 */ 1970 bdev->max_segment_size = 512; 1971 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 1972 g_io_done = false; 1973 1974 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1975 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1976 iov[i].iov_len = 512; 1977 } 1978 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1979 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1980 iov[i].iov_len = 512 * 2; 1981 } 1982 1983 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1984 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1985 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 1986 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1987 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1988 } 1989 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 1990 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 1991 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 1992 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1993 1994 /* Child iov entries exceed the max num of parent IO so split it in next round */ 1995 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 1996 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 1997 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 1998 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1999 2000 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2001 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2002 CU_ASSERT(rc == 0); 2003 CU_ASSERT(g_io_done == false); 2004 2005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2006 stub_complete_io(1); 2007 CU_ASSERT(g_io_done == false); 2008 2009 /* Next round */ 2010 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2011 stub_complete_io(1); 2012 CU_ASSERT(g_io_done == true); 2013 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2014 2015 /* This case is similar to the previous one, but the io composed of 2016 * the last few entries of child iov is not enough for a blocklen, so they 2017 * cannot be put into this IO, but wait until the next time. 2018 */ 2019 bdev->max_segment_size = 512; 2020 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2021 g_io_done = false; 2022 2023 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2024 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2025 iov[i].iov_len = 512; 2026 } 2027 2028 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2029 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2030 iov[i].iov_len = 128; 2031 } 2032 2033 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2034 * Because the left 2 iov is not enough for a blocklen. 2035 */ 2036 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2037 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2038 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2039 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2040 } 2041 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2042 2043 /* The second child io waits until the end of the first child io before executing. 2044 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2045 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2046 */ 2047 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2048 1, 4); 2049 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2050 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2051 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2052 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2053 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2054 2055 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2056 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2057 CU_ASSERT(rc == 0); 2058 CU_ASSERT(g_io_done == false); 2059 2060 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2061 stub_complete_io(1); 2062 CU_ASSERT(g_io_done == false); 2063 2064 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2065 stub_complete_io(1); 2066 CU_ASSERT(g_io_done == true); 2067 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2068 2069 /* A very complicated case. Each sg entry exceeds max_segment_size and 2070 * needs to be split. At the same time, child io must be a multiple of blocklen. 2071 * At the same time, child iovcnt exceeds parent iovcnt. 2072 */ 2073 bdev->max_segment_size = 512 + 128; 2074 bdev->max_num_segments = 3; 2075 g_io_done = false; 2076 2077 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2078 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2079 iov[i].iov_len = 512 + 256; 2080 } 2081 2082 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2083 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2084 iov[i].iov_len = 512 + 128; 2085 } 2086 2087 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2088 * Consume 4 parent IO iov entries per for() round and 6 block size. 2089 * Generate 9 child IOs. 2090 */ 2091 for (i = 0; i < 3; i++) { 2092 uint32_t j = i * 4; 2093 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2094 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2095 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2096 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2097 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2098 2099 /* Child io must be a multiple of blocklen 2100 * iov[j + 2] must be split. If the third entry is also added, 2101 * the multiple of blocklen cannot be guaranteed. But it still 2102 * occupies one iov entry of the parent child iov. 2103 */ 2104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2105 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2106 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2107 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2108 2109 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2110 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2111 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2112 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2113 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2114 } 2115 2116 /* Child iov position at 27, the 10th child IO 2117 * iov entry index is 3 * 4 and offset is 3 * 6 2118 */ 2119 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2120 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2121 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2122 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2123 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2124 2125 /* Child iov position at 30, the 11th child IO */ 2126 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2127 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2128 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2129 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2130 2131 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2132 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2133 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2134 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2135 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2136 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2137 2138 /* Consume 9 child IOs and 27 child iov entries. 2139 * Consume 4 parent IO iov entries per for() round and 6 block size. 2140 * Parent IO iov index start from 16 and block offset start from 24 2141 */ 2142 for (i = 0; i < 3; i++) { 2143 uint32_t j = i * 4 + 16; 2144 uint32_t offset = i * 6 + 24; 2145 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2146 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2147 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2148 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2149 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2150 2151 /* Child io must be a multiple of blocklen 2152 * iov[j + 2] must be split. If the third entry is also added, 2153 * the multiple of blocklen cannot be guaranteed. But it still 2154 * occupies one iov entry of the parent child iov. 2155 */ 2156 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2157 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2158 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2159 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2160 2161 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2162 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2163 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2164 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2165 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2166 } 2167 2168 /* The 22th child IO, child iov position at 30 */ 2169 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2170 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2171 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2172 2173 /* The third round */ 2174 /* Here is the 23nd child IO and child iovpos is 0 */ 2175 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2176 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2177 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2178 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2179 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2180 2181 /* The 24th child IO */ 2182 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2183 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2184 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2185 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2186 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2187 2188 /* The 25th child IO */ 2189 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2190 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2191 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2192 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2193 2194 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2195 50, io_done, NULL); 2196 CU_ASSERT(rc == 0); 2197 CU_ASSERT(g_io_done == false); 2198 2199 /* Parent IO supports up to 32 child iovs, so it is calculated that 2200 * a maximum of 11 IOs can be split at a time, and the 2201 * splitting will continue after the first batch is over. 2202 */ 2203 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2204 stub_complete_io(11); 2205 CU_ASSERT(g_io_done == false); 2206 2207 /* The 2nd round */ 2208 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2209 stub_complete_io(11); 2210 CU_ASSERT(g_io_done == false); 2211 2212 /* The last round */ 2213 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2214 stub_complete_io(3); 2215 CU_ASSERT(g_io_done == true); 2216 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2217 2218 /* Test an WRITE_ZEROES. This should also not be split. */ 2219 bdev->max_segment_size = 512; 2220 bdev->max_num_segments = 1; 2221 g_io_done = false; 2222 2223 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2224 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2225 2226 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2227 CU_ASSERT(rc == 0); 2228 CU_ASSERT(g_io_done == false); 2229 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2230 stub_complete_io(1); 2231 CU_ASSERT(g_io_done == true); 2232 2233 /* Test an UNMAP. This should also not be split. */ 2234 g_io_done = false; 2235 2236 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2237 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2238 2239 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2240 CU_ASSERT(rc == 0); 2241 CU_ASSERT(g_io_done == false); 2242 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2243 stub_complete_io(1); 2244 CU_ASSERT(g_io_done == true); 2245 2246 /* Test a FLUSH. This should also not be split. */ 2247 g_io_done = false; 2248 2249 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2250 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2251 2252 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2253 CU_ASSERT(rc == 0); 2254 CU_ASSERT(g_io_done == false); 2255 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2256 stub_complete_io(1); 2257 CU_ASSERT(g_io_done == true); 2258 2259 spdk_put_io_channel(io_ch); 2260 spdk_bdev_close(desc); 2261 free_bdev(bdev); 2262 spdk_bdev_finish(bdev_fini_cb, NULL); 2263 poll_threads(); 2264 } 2265 2266 static void 2267 bdev_io_mix_split_test(void) 2268 { 2269 struct spdk_bdev *bdev; 2270 struct spdk_bdev_desc *desc = NULL; 2271 struct spdk_io_channel *io_ch; 2272 struct spdk_bdev_opts bdev_opts = {}; 2273 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2274 struct ut_expected_io *expected_io; 2275 uint64_t i; 2276 int rc; 2277 2278 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2279 bdev_opts.bdev_io_pool_size = 512; 2280 bdev_opts.bdev_io_cache_size = 64; 2281 2282 rc = spdk_bdev_set_opts(&bdev_opts); 2283 CU_ASSERT(rc == 0); 2284 spdk_bdev_initialize(bdev_init_cb, NULL); 2285 2286 bdev = allocate_bdev("bdev0"); 2287 2288 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2289 CU_ASSERT(rc == 0); 2290 SPDK_CU_ASSERT_FATAL(desc != NULL); 2291 io_ch = spdk_bdev_get_io_channel(desc); 2292 CU_ASSERT(io_ch != NULL); 2293 2294 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2295 bdev->split_on_optimal_io_boundary = true; 2296 bdev->optimal_io_boundary = 16; 2297 2298 bdev->max_segment_size = 512; 2299 bdev->max_num_segments = 16; 2300 g_io_done = false; 2301 2302 /* IO crossing the IO boundary requires split 2303 * Total 2 child IOs. 2304 */ 2305 2306 /* The 1st child IO split the segment_size to multiple segment entry */ 2307 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2308 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2309 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2310 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2311 2312 /* The 2nd child IO split the segment_size to multiple segment entry */ 2313 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2314 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2315 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2316 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2317 2318 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2319 CU_ASSERT(rc == 0); 2320 CU_ASSERT(g_io_done == false); 2321 2322 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2323 stub_complete_io(2); 2324 CU_ASSERT(g_io_done == true); 2325 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2326 2327 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2328 bdev->max_segment_size = 15 * 512; 2329 bdev->max_num_segments = 1; 2330 g_io_done = false; 2331 2332 /* IO crossing the IO boundary requires split. 2333 * The 1st child IO segment size exceeds the max_segment_size, 2334 * So 1st child IO will be splitted to multiple segment entry. 2335 * Then it split to 2 child IOs because of the max_num_segments. 2336 * Total 3 child IOs. 2337 */ 2338 2339 /* The first 2 IOs are in an IO boundary. 2340 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2341 * So it split to the first 2 IOs. 2342 */ 2343 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2344 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2345 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2346 2347 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2348 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2349 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2350 2351 /* The 3rd Child IO is because of the io boundary */ 2352 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2353 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2354 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2355 2356 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2357 CU_ASSERT(rc == 0); 2358 CU_ASSERT(g_io_done == false); 2359 2360 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2361 stub_complete_io(3); 2362 CU_ASSERT(g_io_done == true); 2363 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2364 2365 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2366 bdev->max_segment_size = 17 * 512; 2367 bdev->max_num_segments = 1; 2368 g_io_done = false; 2369 2370 /* IO crossing the IO boundary requires split. 2371 * Child IO does not split. 2372 * Total 2 child IOs. 2373 */ 2374 2375 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2376 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2377 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2378 2379 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2380 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2381 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2382 2383 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2384 CU_ASSERT(rc == 0); 2385 CU_ASSERT(g_io_done == false); 2386 2387 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2388 stub_complete_io(2); 2389 CU_ASSERT(g_io_done == true); 2390 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2391 2392 /* Now set up a more complex, multi-vector command that needs to be split, 2393 * including splitting iovecs. 2394 * optimal_io_boundary < max_segment_size * max_num_segments 2395 */ 2396 bdev->max_segment_size = 3 * 512; 2397 bdev->max_num_segments = 6; 2398 g_io_done = false; 2399 2400 iov[0].iov_base = (void *)0x10000; 2401 iov[0].iov_len = 4 * 512; 2402 iov[1].iov_base = (void *)0x20000; 2403 iov[1].iov_len = 4 * 512; 2404 iov[2].iov_base = (void *)0x30000; 2405 iov[2].iov_len = 10 * 512; 2406 2407 /* IO crossing the IO boundary requires split. 2408 * The 1st child IO segment size exceeds the max_segment_size and after 2409 * splitting segment_size, the num_segments exceeds max_num_segments. 2410 * So 1st child IO will be splitted to 2 child IOs. 2411 * Total 3 child IOs. 2412 */ 2413 2414 /* The first 2 IOs are in an IO boundary. 2415 * After splitting segmemt size the segment num exceeds. 2416 * So it splits to 2 child IOs. 2417 */ 2418 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2419 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2420 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2421 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2422 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2423 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2424 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2425 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2426 2427 /* The 2nd child IO has the left segment entry */ 2428 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2429 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2431 2432 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2433 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2434 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2435 2436 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2437 CU_ASSERT(rc == 0); 2438 CU_ASSERT(g_io_done == false); 2439 2440 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2441 stub_complete_io(3); 2442 CU_ASSERT(g_io_done == true); 2443 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2444 2445 /* A very complicated case. Each sg entry exceeds max_segment_size 2446 * and split on io boundary. 2447 * optimal_io_boundary < max_segment_size * max_num_segments 2448 */ 2449 bdev->max_segment_size = 3 * 512; 2450 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2451 g_io_done = false; 2452 2453 for (i = 0; i < 20; i++) { 2454 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2455 iov[i].iov_len = 512 * 4; 2456 } 2457 2458 /* IO crossing the IO boundary requires split. 2459 * 80 block length can split 5 child IOs base on offset and IO boundary. 2460 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2461 * Total 5 child IOs. 2462 */ 2463 2464 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2465 * So each child IO occupies 8 child iov entries. 2466 */ 2467 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2468 for (i = 0; i < 4; i++) { 2469 int iovcnt = i * 2; 2470 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2471 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2472 } 2473 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2474 2475 /* 2nd child IO and total 16 child iov entries of parent IO */ 2476 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2477 for (i = 4; i < 8; i++) { 2478 int iovcnt = (i - 4) * 2; 2479 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2480 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2481 } 2482 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2483 2484 /* 3rd child IO and total 24 child iov entries of parent IO */ 2485 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2486 for (i = 8; i < 12; i++) { 2487 int iovcnt = (i - 8) * 2; 2488 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2489 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2490 } 2491 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2492 2493 /* 4th child IO and total 32 child iov entries of parent IO */ 2494 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2495 for (i = 12; i < 16; i++) { 2496 int iovcnt = (i - 12) * 2; 2497 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2498 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2499 } 2500 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2501 2502 /* 5th child IO and because of the child iov entry it should be splitted 2503 * in next round. 2504 */ 2505 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2506 for (i = 16; i < 20; i++) { 2507 int iovcnt = (i - 16) * 2; 2508 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2509 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2510 } 2511 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2512 2513 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2514 CU_ASSERT(rc == 0); 2515 CU_ASSERT(g_io_done == false); 2516 2517 /* First split round */ 2518 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2519 stub_complete_io(4); 2520 CU_ASSERT(g_io_done == false); 2521 2522 /* Second split round */ 2523 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2524 stub_complete_io(1); 2525 CU_ASSERT(g_io_done == true); 2526 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2527 2528 spdk_put_io_channel(io_ch); 2529 spdk_bdev_close(desc); 2530 free_bdev(bdev); 2531 spdk_bdev_finish(bdev_fini_cb, NULL); 2532 poll_threads(); 2533 } 2534 2535 static void 2536 bdev_io_split_with_io_wait(void) 2537 { 2538 struct spdk_bdev *bdev; 2539 struct spdk_bdev_desc *desc = NULL; 2540 struct spdk_io_channel *io_ch; 2541 struct spdk_bdev_channel *channel; 2542 struct spdk_bdev_mgmt_channel *mgmt_ch; 2543 struct spdk_bdev_opts bdev_opts = {}; 2544 struct iovec iov[3]; 2545 struct ut_expected_io *expected_io; 2546 int rc; 2547 2548 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2549 bdev_opts.bdev_io_pool_size = 2; 2550 bdev_opts.bdev_io_cache_size = 1; 2551 2552 rc = spdk_bdev_set_opts(&bdev_opts); 2553 CU_ASSERT(rc == 0); 2554 spdk_bdev_initialize(bdev_init_cb, NULL); 2555 2556 bdev = allocate_bdev("bdev0"); 2557 2558 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2559 CU_ASSERT(rc == 0); 2560 CU_ASSERT(desc != NULL); 2561 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2562 io_ch = spdk_bdev_get_io_channel(desc); 2563 CU_ASSERT(io_ch != NULL); 2564 channel = spdk_io_channel_get_ctx(io_ch); 2565 mgmt_ch = channel->shared_resource->mgmt_ch; 2566 2567 bdev->optimal_io_boundary = 16; 2568 bdev->split_on_optimal_io_boundary = true; 2569 2570 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2571 CU_ASSERT(rc == 0); 2572 2573 /* Now test that a single-vector command is split correctly. 2574 * Offset 14, length 8, payload 0xF000 2575 * Child - Offset 14, length 2, payload 0xF000 2576 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2577 * 2578 * Set up the expected values before calling spdk_bdev_read_blocks 2579 */ 2580 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2581 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2582 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2583 2584 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2585 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2586 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2587 2588 /* The following children will be submitted sequentially due to the capacity of 2589 * spdk_bdev_io. 2590 */ 2591 2592 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2593 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2594 CU_ASSERT(rc == 0); 2595 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2596 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2597 2598 /* Completing the first read I/O will submit the first child */ 2599 stub_complete_io(1); 2600 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2601 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2602 2603 /* Completing the first child will submit the second child */ 2604 stub_complete_io(1); 2605 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2606 2607 /* Complete the second child I/O. This should result in our callback getting 2608 * invoked since the parent I/O is now complete. 2609 */ 2610 stub_complete_io(1); 2611 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2612 2613 /* Now set up a more complex, multi-vector command that needs to be split, 2614 * including splitting iovecs. 2615 */ 2616 iov[0].iov_base = (void *)0x10000; 2617 iov[0].iov_len = 512; 2618 iov[1].iov_base = (void *)0x20000; 2619 iov[1].iov_len = 20 * 512; 2620 iov[2].iov_base = (void *)0x30000; 2621 iov[2].iov_len = 11 * 512; 2622 2623 g_io_done = false; 2624 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2625 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2626 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2627 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2628 2629 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2630 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2631 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2632 2633 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2634 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2635 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2636 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2637 2638 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2639 CU_ASSERT(rc == 0); 2640 CU_ASSERT(g_io_done == false); 2641 2642 /* The following children will be submitted sequentially due to the capacity of 2643 * spdk_bdev_io. 2644 */ 2645 2646 /* Completing the first child will submit the second child */ 2647 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2648 stub_complete_io(1); 2649 CU_ASSERT(g_io_done == false); 2650 2651 /* Completing the second child will submit the third child */ 2652 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2653 stub_complete_io(1); 2654 CU_ASSERT(g_io_done == false); 2655 2656 /* Completing the third child will result in our callback getting invoked 2657 * since the parent I/O is now complete. 2658 */ 2659 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2660 stub_complete_io(1); 2661 CU_ASSERT(g_io_done == true); 2662 2663 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2664 2665 spdk_put_io_channel(io_ch); 2666 spdk_bdev_close(desc); 2667 free_bdev(bdev); 2668 spdk_bdev_finish(bdev_fini_cb, NULL); 2669 poll_threads(); 2670 } 2671 2672 static void 2673 bdev_io_alignment(void) 2674 { 2675 struct spdk_bdev *bdev; 2676 struct spdk_bdev_desc *desc = NULL; 2677 struct spdk_io_channel *io_ch; 2678 struct spdk_bdev_opts bdev_opts = {}; 2679 int rc; 2680 void *buf = NULL; 2681 struct iovec iovs[2]; 2682 int iovcnt; 2683 uint64_t alignment; 2684 2685 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2686 bdev_opts.bdev_io_pool_size = 20; 2687 bdev_opts.bdev_io_cache_size = 2; 2688 2689 rc = spdk_bdev_set_opts(&bdev_opts); 2690 CU_ASSERT(rc == 0); 2691 spdk_bdev_initialize(bdev_init_cb, NULL); 2692 2693 fn_table.submit_request = stub_submit_request_get_buf; 2694 bdev = allocate_bdev("bdev0"); 2695 2696 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2697 CU_ASSERT(rc == 0); 2698 CU_ASSERT(desc != NULL); 2699 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2700 io_ch = spdk_bdev_get_io_channel(desc); 2701 CU_ASSERT(io_ch != NULL); 2702 2703 /* Create aligned buffer */ 2704 rc = posix_memalign(&buf, 4096, 8192); 2705 SPDK_CU_ASSERT_FATAL(rc == 0); 2706 2707 /* Pass aligned single buffer with no alignment required */ 2708 alignment = 1; 2709 bdev->required_alignment = spdk_u32log2(alignment); 2710 2711 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2712 CU_ASSERT(rc == 0); 2713 stub_complete_io(1); 2714 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2715 alignment)); 2716 2717 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2718 CU_ASSERT(rc == 0); 2719 stub_complete_io(1); 2720 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2721 alignment)); 2722 2723 /* Pass unaligned single buffer with no alignment required */ 2724 alignment = 1; 2725 bdev->required_alignment = spdk_u32log2(alignment); 2726 2727 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2728 CU_ASSERT(rc == 0); 2729 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2730 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2731 stub_complete_io(1); 2732 2733 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2734 CU_ASSERT(rc == 0); 2735 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2736 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2737 stub_complete_io(1); 2738 2739 /* Pass unaligned single buffer with 512 alignment required */ 2740 alignment = 512; 2741 bdev->required_alignment = spdk_u32log2(alignment); 2742 2743 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2744 CU_ASSERT(rc == 0); 2745 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2746 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2747 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2748 alignment)); 2749 stub_complete_io(1); 2750 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2751 2752 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2753 CU_ASSERT(rc == 0); 2754 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2755 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2756 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2757 alignment)); 2758 stub_complete_io(1); 2759 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2760 2761 /* Pass unaligned single buffer with 4096 alignment required */ 2762 alignment = 4096; 2763 bdev->required_alignment = spdk_u32log2(alignment); 2764 2765 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2766 CU_ASSERT(rc == 0); 2767 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2768 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2769 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2770 alignment)); 2771 stub_complete_io(1); 2772 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2773 2774 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2775 CU_ASSERT(rc == 0); 2776 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2777 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2778 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2779 alignment)); 2780 stub_complete_io(1); 2781 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2782 2783 /* Pass aligned iovs with no alignment required */ 2784 alignment = 1; 2785 bdev->required_alignment = spdk_u32log2(alignment); 2786 2787 iovcnt = 1; 2788 iovs[0].iov_base = buf; 2789 iovs[0].iov_len = 512; 2790 2791 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2792 CU_ASSERT(rc == 0); 2793 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2794 stub_complete_io(1); 2795 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2796 2797 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2798 CU_ASSERT(rc == 0); 2799 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2800 stub_complete_io(1); 2801 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2802 2803 /* Pass unaligned iovs with no alignment required */ 2804 alignment = 1; 2805 bdev->required_alignment = spdk_u32log2(alignment); 2806 2807 iovcnt = 2; 2808 iovs[0].iov_base = buf + 16; 2809 iovs[0].iov_len = 256; 2810 iovs[1].iov_base = buf + 16 + 256 + 32; 2811 iovs[1].iov_len = 256; 2812 2813 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2814 CU_ASSERT(rc == 0); 2815 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2816 stub_complete_io(1); 2817 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2818 2819 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2820 CU_ASSERT(rc == 0); 2821 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2822 stub_complete_io(1); 2823 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2824 2825 /* Pass unaligned iov with 2048 alignment required */ 2826 alignment = 2048; 2827 bdev->required_alignment = spdk_u32log2(alignment); 2828 2829 iovcnt = 2; 2830 iovs[0].iov_base = buf + 16; 2831 iovs[0].iov_len = 256; 2832 iovs[1].iov_base = buf + 16 + 256 + 32; 2833 iovs[1].iov_len = 256; 2834 2835 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2836 CU_ASSERT(rc == 0); 2837 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2838 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2839 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2840 alignment)); 2841 stub_complete_io(1); 2842 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2843 2844 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2845 CU_ASSERT(rc == 0); 2846 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2847 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2848 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2849 alignment)); 2850 stub_complete_io(1); 2851 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2852 2853 /* Pass iov without allocated buffer without alignment required */ 2854 alignment = 1; 2855 bdev->required_alignment = spdk_u32log2(alignment); 2856 2857 iovcnt = 1; 2858 iovs[0].iov_base = NULL; 2859 iovs[0].iov_len = 0; 2860 2861 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2862 CU_ASSERT(rc == 0); 2863 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2864 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2865 alignment)); 2866 stub_complete_io(1); 2867 2868 /* Pass iov without allocated buffer with 1024 alignment required */ 2869 alignment = 1024; 2870 bdev->required_alignment = spdk_u32log2(alignment); 2871 2872 iovcnt = 1; 2873 iovs[0].iov_base = NULL; 2874 iovs[0].iov_len = 0; 2875 2876 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2877 CU_ASSERT(rc == 0); 2878 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2879 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2880 alignment)); 2881 stub_complete_io(1); 2882 2883 spdk_put_io_channel(io_ch); 2884 spdk_bdev_close(desc); 2885 free_bdev(bdev); 2886 fn_table.submit_request = stub_submit_request; 2887 spdk_bdev_finish(bdev_fini_cb, NULL); 2888 poll_threads(); 2889 2890 free(buf); 2891 } 2892 2893 static void 2894 bdev_io_alignment_with_boundary(void) 2895 { 2896 struct spdk_bdev *bdev; 2897 struct spdk_bdev_desc *desc = NULL; 2898 struct spdk_io_channel *io_ch; 2899 struct spdk_bdev_opts bdev_opts = {}; 2900 int rc; 2901 void *buf = NULL; 2902 struct iovec iovs[2]; 2903 int iovcnt; 2904 uint64_t alignment; 2905 2906 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2907 bdev_opts.bdev_io_pool_size = 20; 2908 bdev_opts.bdev_io_cache_size = 2; 2909 2910 bdev_opts.opts_size = sizeof(bdev_opts); 2911 rc = spdk_bdev_set_opts(&bdev_opts); 2912 CU_ASSERT(rc == 0); 2913 spdk_bdev_initialize(bdev_init_cb, NULL); 2914 2915 fn_table.submit_request = stub_submit_request_get_buf; 2916 bdev = allocate_bdev("bdev0"); 2917 2918 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2919 CU_ASSERT(rc == 0); 2920 CU_ASSERT(desc != NULL); 2921 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2922 io_ch = spdk_bdev_get_io_channel(desc); 2923 CU_ASSERT(io_ch != NULL); 2924 2925 /* Create aligned buffer */ 2926 rc = posix_memalign(&buf, 4096, 131072); 2927 SPDK_CU_ASSERT_FATAL(rc == 0); 2928 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2929 2930 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2931 alignment = 512; 2932 bdev->required_alignment = spdk_u32log2(alignment); 2933 bdev->optimal_io_boundary = 2; 2934 bdev->split_on_optimal_io_boundary = true; 2935 2936 iovcnt = 1; 2937 iovs[0].iov_base = NULL; 2938 iovs[0].iov_len = 512 * 3; 2939 2940 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2941 CU_ASSERT(rc == 0); 2942 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2943 stub_complete_io(2); 2944 2945 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2946 alignment = 512; 2947 bdev->required_alignment = spdk_u32log2(alignment); 2948 bdev->optimal_io_boundary = 16; 2949 bdev->split_on_optimal_io_boundary = true; 2950 2951 iovcnt = 1; 2952 iovs[0].iov_base = NULL; 2953 iovs[0].iov_len = 512 * 16; 2954 2955 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2956 CU_ASSERT(rc == 0); 2957 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2958 stub_complete_io(2); 2959 2960 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2961 alignment = 512; 2962 bdev->required_alignment = spdk_u32log2(alignment); 2963 bdev->optimal_io_boundary = 128; 2964 bdev->split_on_optimal_io_boundary = true; 2965 2966 iovcnt = 1; 2967 iovs[0].iov_base = buf + 16; 2968 iovs[0].iov_len = 512 * 160; 2969 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2970 CU_ASSERT(rc == 0); 2971 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2972 stub_complete_io(2); 2973 2974 /* 512 * 3 with 2 IO boundary */ 2975 alignment = 512; 2976 bdev->required_alignment = spdk_u32log2(alignment); 2977 bdev->optimal_io_boundary = 2; 2978 bdev->split_on_optimal_io_boundary = true; 2979 2980 iovcnt = 2; 2981 iovs[0].iov_base = buf + 16; 2982 iovs[0].iov_len = 512; 2983 iovs[1].iov_base = buf + 16 + 512 + 32; 2984 iovs[1].iov_len = 1024; 2985 2986 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2987 CU_ASSERT(rc == 0); 2988 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2989 stub_complete_io(2); 2990 2991 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2992 CU_ASSERT(rc == 0); 2993 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2994 stub_complete_io(2); 2995 2996 /* 512 * 64 with 32 IO boundary */ 2997 bdev->optimal_io_boundary = 32; 2998 iovcnt = 2; 2999 iovs[0].iov_base = buf + 16; 3000 iovs[0].iov_len = 16384; 3001 iovs[1].iov_base = buf + 16 + 16384 + 32; 3002 iovs[1].iov_len = 16384; 3003 3004 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3005 CU_ASSERT(rc == 0); 3006 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3007 stub_complete_io(3); 3008 3009 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3010 CU_ASSERT(rc == 0); 3011 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3012 stub_complete_io(3); 3013 3014 /* 512 * 160 with 32 IO boundary */ 3015 iovcnt = 1; 3016 iovs[0].iov_base = buf + 16; 3017 iovs[0].iov_len = 16384 + 65536; 3018 3019 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3020 CU_ASSERT(rc == 0); 3021 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3022 stub_complete_io(6); 3023 3024 spdk_put_io_channel(io_ch); 3025 spdk_bdev_close(desc); 3026 free_bdev(bdev); 3027 fn_table.submit_request = stub_submit_request; 3028 spdk_bdev_finish(bdev_fini_cb, NULL); 3029 poll_threads(); 3030 3031 free(buf); 3032 } 3033 3034 static void 3035 histogram_status_cb(void *cb_arg, int status) 3036 { 3037 g_status = status; 3038 } 3039 3040 static void 3041 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3042 { 3043 g_status = status; 3044 g_histogram = histogram; 3045 } 3046 3047 static void 3048 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3049 uint64_t total, uint64_t so_far) 3050 { 3051 g_count += count; 3052 } 3053 3054 static void 3055 bdev_histograms(void) 3056 { 3057 struct spdk_bdev *bdev; 3058 struct spdk_bdev_desc *desc = NULL; 3059 struct spdk_io_channel *ch; 3060 struct spdk_histogram_data *histogram; 3061 uint8_t buf[4096]; 3062 int rc; 3063 3064 spdk_bdev_initialize(bdev_init_cb, NULL); 3065 3066 bdev = allocate_bdev("bdev"); 3067 3068 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3069 CU_ASSERT(rc == 0); 3070 CU_ASSERT(desc != NULL); 3071 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3072 3073 ch = spdk_bdev_get_io_channel(desc); 3074 CU_ASSERT(ch != NULL); 3075 3076 /* Enable histogram */ 3077 g_status = -1; 3078 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3079 poll_threads(); 3080 CU_ASSERT(g_status == 0); 3081 CU_ASSERT(bdev->internal.histogram_enabled == true); 3082 3083 /* Allocate histogram */ 3084 histogram = spdk_histogram_data_alloc(); 3085 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3086 3087 /* Check if histogram is zeroed */ 3088 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3089 poll_threads(); 3090 CU_ASSERT(g_status == 0); 3091 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3092 3093 g_count = 0; 3094 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3095 3096 CU_ASSERT(g_count == 0); 3097 3098 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3099 CU_ASSERT(rc == 0); 3100 3101 spdk_delay_us(10); 3102 stub_complete_io(1); 3103 poll_threads(); 3104 3105 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3106 CU_ASSERT(rc == 0); 3107 3108 spdk_delay_us(10); 3109 stub_complete_io(1); 3110 poll_threads(); 3111 3112 /* Check if histogram gathered data from all I/O channels */ 3113 g_histogram = NULL; 3114 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3115 poll_threads(); 3116 CU_ASSERT(g_status == 0); 3117 CU_ASSERT(bdev->internal.histogram_enabled == true); 3118 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3119 3120 g_count = 0; 3121 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3122 CU_ASSERT(g_count == 2); 3123 3124 /* Disable histogram */ 3125 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3126 poll_threads(); 3127 CU_ASSERT(g_status == 0); 3128 CU_ASSERT(bdev->internal.histogram_enabled == false); 3129 3130 /* Try to run histogram commands on disabled bdev */ 3131 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3132 poll_threads(); 3133 CU_ASSERT(g_status == -EFAULT); 3134 3135 spdk_histogram_data_free(histogram); 3136 spdk_put_io_channel(ch); 3137 spdk_bdev_close(desc); 3138 free_bdev(bdev); 3139 spdk_bdev_finish(bdev_fini_cb, NULL); 3140 poll_threads(); 3141 } 3142 3143 static void 3144 _bdev_compare(bool emulated) 3145 { 3146 struct spdk_bdev *bdev; 3147 struct spdk_bdev_desc *desc = NULL; 3148 struct spdk_io_channel *ioch; 3149 struct ut_expected_io *expected_io; 3150 uint64_t offset, num_blocks; 3151 uint32_t num_completed; 3152 char aa_buf[512]; 3153 char bb_buf[512]; 3154 struct iovec compare_iov; 3155 uint8_t io_type; 3156 int rc; 3157 3158 if (emulated) { 3159 io_type = SPDK_BDEV_IO_TYPE_READ; 3160 } else { 3161 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3162 } 3163 3164 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3165 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3166 3167 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3168 3169 spdk_bdev_initialize(bdev_init_cb, NULL); 3170 fn_table.submit_request = stub_submit_request_get_buf; 3171 bdev = allocate_bdev("bdev"); 3172 3173 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3174 CU_ASSERT_EQUAL(rc, 0); 3175 SPDK_CU_ASSERT_FATAL(desc != NULL); 3176 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3177 ioch = spdk_bdev_get_io_channel(desc); 3178 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3179 3180 fn_table.submit_request = stub_submit_request_get_buf; 3181 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3182 3183 offset = 50; 3184 num_blocks = 1; 3185 compare_iov.iov_base = aa_buf; 3186 compare_iov.iov_len = sizeof(aa_buf); 3187 3188 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3189 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3190 3191 g_io_done = false; 3192 g_compare_read_buf = aa_buf; 3193 g_compare_read_buf_len = sizeof(aa_buf); 3194 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3195 CU_ASSERT_EQUAL(rc, 0); 3196 num_completed = stub_complete_io(1); 3197 CU_ASSERT_EQUAL(num_completed, 1); 3198 CU_ASSERT(g_io_done == true); 3199 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3200 3201 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3202 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3203 3204 g_io_done = false; 3205 g_compare_read_buf = bb_buf; 3206 g_compare_read_buf_len = sizeof(bb_buf); 3207 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3208 CU_ASSERT_EQUAL(rc, 0); 3209 num_completed = stub_complete_io(1); 3210 CU_ASSERT_EQUAL(num_completed, 1); 3211 CU_ASSERT(g_io_done == true); 3212 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3213 3214 spdk_put_io_channel(ioch); 3215 spdk_bdev_close(desc); 3216 free_bdev(bdev); 3217 fn_table.submit_request = stub_submit_request; 3218 spdk_bdev_finish(bdev_fini_cb, NULL); 3219 poll_threads(); 3220 3221 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3222 3223 g_compare_read_buf = NULL; 3224 } 3225 3226 static void 3227 bdev_compare(void) 3228 { 3229 _bdev_compare(true); 3230 _bdev_compare(false); 3231 } 3232 3233 static void 3234 bdev_compare_and_write(void) 3235 { 3236 struct spdk_bdev *bdev; 3237 struct spdk_bdev_desc *desc = NULL; 3238 struct spdk_io_channel *ioch; 3239 struct ut_expected_io *expected_io; 3240 uint64_t offset, num_blocks; 3241 uint32_t num_completed; 3242 char aa_buf[512]; 3243 char bb_buf[512]; 3244 char cc_buf[512]; 3245 char write_buf[512]; 3246 struct iovec compare_iov; 3247 struct iovec write_iov; 3248 int rc; 3249 3250 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3251 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3252 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3253 3254 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3255 3256 spdk_bdev_initialize(bdev_init_cb, NULL); 3257 fn_table.submit_request = stub_submit_request_get_buf; 3258 bdev = allocate_bdev("bdev"); 3259 3260 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3261 CU_ASSERT_EQUAL(rc, 0); 3262 SPDK_CU_ASSERT_FATAL(desc != NULL); 3263 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3264 ioch = spdk_bdev_get_io_channel(desc); 3265 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3266 3267 fn_table.submit_request = stub_submit_request_get_buf; 3268 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3269 3270 offset = 50; 3271 num_blocks = 1; 3272 compare_iov.iov_base = aa_buf; 3273 compare_iov.iov_len = sizeof(aa_buf); 3274 write_iov.iov_base = bb_buf; 3275 write_iov.iov_len = sizeof(bb_buf); 3276 3277 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3278 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3279 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3280 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3281 3282 g_io_done = false; 3283 g_compare_read_buf = aa_buf; 3284 g_compare_read_buf_len = sizeof(aa_buf); 3285 memset(write_buf, 0, sizeof(write_buf)); 3286 g_compare_write_buf = write_buf; 3287 g_compare_write_buf_len = sizeof(write_buf); 3288 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3289 offset, num_blocks, io_done, NULL); 3290 /* Trigger range locking */ 3291 poll_threads(); 3292 CU_ASSERT_EQUAL(rc, 0); 3293 num_completed = stub_complete_io(1); 3294 CU_ASSERT_EQUAL(num_completed, 1); 3295 CU_ASSERT(g_io_done == false); 3296 num_completed = stub_complete_io(1); 3297 /* Trigger range unlocking */ 3298 poll_threads(); 3299 CU_ASSERT_EQUAL(num_completed, 1); 3300 CU_ASSERT(g_io_done == true); 3301 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3302 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3303 3304 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3305 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3306 3307 g_io_done = false; 3308 g_compare_read_buf = cc_buf; 3309 g_compare_read_buf_len = sizeof(cc_buf); 3310 memset(write_buf, 0, sizeof(write_buf)); 3311 g_compare_write_buf = write_buf; 3312 g_compare_write_buf_len = sizeof(write_buf); 3313 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3314 offset, num_blocks, io_done, NULL); 3315 /* Trigger range locking */ 3316 poll_threads(); 3317 CU_ASSERT_EQUAL(rc, 0); 3318 num_completed = stub_complete_io(1); 3319 /* Trigger range unlocking earlier because we expect error here */ 3320 poll_threads(); 3321 CU_ASSERT_EQUAL(num_completed, 1); 3322 CU_ASSERT(g_io_done == true); 3323 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3324 num_completed = stub_complete_io(1); 3325 CU_ASSERT_EQUAL(num_completed, 0); 3326 3327 spdk_put_io_channel(ioch); 3328 spdk_bdev_close(desc); 3329 free_bdev(bdev); 3330 fn_table.submit_request = stub_submit_request; 3331 spdk_bdev_finish(bdev_fini_cb, NULL); 3332 poll_threads(); 3333 3334 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3335 3336 g_compare_read_buf = NULL; 3337 g_compare_write_buf = NULL; 3338 } 3339 3340 static void 3341 bdev_write_zeroes(void) 3342 { 3343 struct spdk_bdev *bdev; 3344 struct spdk_bdev_desc *desc = NULL; 3345 struct spdk_io_channel *ioch; 3346 struct ut_expected_io *expected_io; 3347 uint64_t offset, num_io_blocks, num_blocks; 3348 uint32_t num_completed, num_requests; 3349 int rc; 3350 3351 spdk_bdev_initialize(bdev_init_cb, NULL); 3352 bdev = allocate_bdev("bdev"); 3353 3354 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3355 CU_ASSERT_EQUAL(rc, 0); 3356 SPDK_CU_ASSERT_FATAL(desc != NULL); 3357 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3358 ioch = spdk_bdev_get_io_channel(desc); 3359 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3360 3361 fn_table.submit_request = stub_submit_request; 3362 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3363 3364 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3365 bdev->md_len = 0; 3366 bdev->blocklen = 4096; 3367 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3368 3369 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3370 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3371 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3372 CU_ASSERT_EQUAL(rc, 0); 3373 num_completed = stub_complete_io(1); 3374 CU_ASSERT_EQUAL(num_completed, 1); 3375 3376 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3377 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3378 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3379 num_requests = 2; 3380 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3381 3382 for (offset = 0; offset < num_requests; ++offset) { 3383 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3384 offset * num_io_blocks, num_io_blocks, 0); 3385 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3386 } 3387 3388 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3389 CU_ASSERT_EQUAL(rc, 0); 3390 num_completed = stub_complete_io(num_requests); 3391 CU_ASSERT_EQUAL(num_completed, num_requests); 3392 3393 /* Check that the splitting is correct if bdev has interleaved metadata */ 3394 bdev->md_interleave = true; 3395 bdev->md_len = 64; 3396 bdev->blocklen = 4096 + 64; 3397 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3398 3399 num_requests = offset = 0; 3400 while (offset < num_blocks) { 3401 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3402 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3403 offset, num_io_blocks, 0); 3404 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3405 offset += num_io_blocks; 3406 num_requests++; 3407 } 3408 3409 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3410 CU_ASSERT_EQUAL(rc, 0); 3411 num_completed = stub_complete_io(num_requests); 3412 CU_ASSERT_EQUAL(num_completed, num_requests); 3413 num_completed = stub_complete_io(num_requests); 3414 assert(num_completed == 0); 3415 3416 /* Check the the same for separate metadata buffer */ 3417 bdev->md_interleave = false; 3418 bdev->md_len = 64; 3419 bdev->blocklen = 4096; 3420 3421 num_requests = offset = 0; 3422 while (offset < num_blocks) { 3423 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3424 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3425 offset, num_io_blocks, 0); 3426 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3427 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3428 offset += num_io_blocks; 3429 num_requests++; 3430 } 3431 3432 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3433 CU_ASSERT_EQUAL(rc, 0); 3434 num_completed = stub_complete_io(num_requests); 3435 CU_ASSERT_EQUAL(num_completed, num_requests); 3436 3437 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3438 spdk_put_io_channel(ioch); 3439 spdk_bdev_close(desc); 3440 free_bdev(bdev); 3441 spdk_bdev_finish(bdev_fini_cb, NULL); 3442 poll_threads(); 3443 } 3444 3445 static void 3446 bdev_open_while_hotremove(void) 3447 { 3448 struct spdk_bdev *bdev; 3449 struct spdk_bdev_desc *desc[2] = {}; 3450 int rc; 3451 3452 bdev = allocate_bdev("bdev"); 3453 3454 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3455 CU_ASSERT(rc == 0); 3456 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3457 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3458 3459 spdk_bdev_unregister(bdev, NULL, NULL); 3460 3461 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3462 CU_ASSERT(rc == -ENODEV); 3463 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3464 3465 spdk_bdev_close(desc[0]); 3466 free_bdev(bdev); 3467 } 3468 3469 static void 3470 bdev_close_while_hotremove(void) 3471 { 3472 struct spdk_bdev *bdev; 3473 struct spdk_bdev_desc *desc = NULL; 3474 int rc = 0; 3475 3476 bdev = allocate_bdev("bdev"); 3477 3478 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3479 CU_ASSERT_EQUAL(rc, 0); 3480 SPDK_CU_ASSERT_FATAL(desc != NULL); 3481 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3482 3483 /* Simulate hot-unplug by unregistering bdev */ 3484 g_event_type1 = 0xFF; 3485 g_unregister_arg = NULL; 3486 g_unregister_rc = -1; 3487 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3488 /* Close device while remove event is in flight */ 3489 spdk_bdev_close(desc); 3490 3491 /* Ensure that unregister callback is delayed */ 3492 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3493 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3494 3495 poll_threads(); 3496 3497 /* Event callback shall not be issued because device was closed */ 3498 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3499 /* Unregister callback is issued */ 3500 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3501 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3502 3503 free_bdev(bdev); 3504 } 3505 3506 static void 3507 bdev_open_ext(void) 3508 { 3509 struct spdk_bdev *bdev; 3510 struct spdk_bdev_desc *desc1 = NULL; 3511 struct spdk_bdev_desc *desc2 = NULL; 3512 int rc = 0; 3513 3514 bdev = allocate_bdev("bdev"); 3515 3516 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3517 CU_ASSERT_EQUAL(rc, -EINVAL); 3518 3519 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3520 CU_ASSERT_EQUAL(rc, 0); 3521 3522 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3523 CU_ASSERT_EQUAL(rc, 0); 3524 3525 g_event_type1 = 0xFF; 3526 g_event_type2 = 0xFF; 3527 3528 /* Simulate hot-unplug by unregistering bdev */ 3529 spdk_bdev_unregister(bdev, NULL, NULL); 3530 poll_threads(); 3531 3532 /* Check if correct events have been triggered in event callback fn */ 3533 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3534 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3535 3536 free_bdev(bdev); 3537 poll_threads(); 3538 } 3539 3540 struct timeout_io_cb_arg { 3541 struct iovec iov; 3542 uint8_t type; 3543 }; 3544 3545 static int 3546 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3547 { 3548 struct spdk_bdev_io *bdev_io; 3549 int n = 0; 3550 3551 if (!ch) { 3552 return -1; 3553 } 3554 3555 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3556 n++; 3557 } 3558 3559 return n; 3560 } 3561 3562 static void 3563 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3564 { 3565 struct timeout_io_cb_arg *ctx = cb_arg; 3566 3567 ctx->type = bdev_io->type; 3568 ctx->iov.iov_base = bdev_io->iov.iov_base; 3569 ctx->iov.iov_len = bdev_io->iov.iov_len; 3570 } 3571 3572 static void 3573 bdev_set_io_timeout(void) 3574 { 3575 struct spdk_bdev *bdev; 3576 struct spdk_bdev_desc *desc = NULL; 3577 struct spdk_io_channel *io_ch = NULL; 3578 struct spdk_bdev_channel *bdev_ch = NULL; 3579 struct timeout_io_cb_arg cb_arg; 3580 3581 spdk_bdev_initialize(bdev_init_cb, NULL); 3582 3583 bdev = allocate_bdev("bdev"); 3584 3585 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3586 SPDK_CU_ASSERT_FATAL(desc != NULL); 3587 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3588 3589 io_ch = spdk_bdev_get_io_channel(desc); 3590 CU_ASSERT(io_ch != NULL); 3591 3592 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3593 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3594 3595 /* This is the part1. 3596 * We will check the bdev_ch->io_submitted list 3597 * TO make sure that it can link IOs and only the user submitted IOs 3598 */ 3599 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3600 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3601 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3602 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3603 stub_complete_io(1); 3604 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3605 stub_complete_io(1); 3606 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3607 3608 /* Split IO */ 3609 bdev->optimal_io_boundary = 16; 3610 bdev->split_on_optimal_io_boundary = true; 3611 3612 /* Now test that a single-vector command is split correctly. 3613 * Offset 14, length 8, payload 0xF000 3614 * Child - Offset 14, length 2, payload 0xF000 3615 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3616 * 3617 * Set up the expected values before calling spdk_bdev_read_blocks 3618 */ 3619 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3620 /* We count all submitted IOs including IO that are generated by splitting. */ 3621 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 3622 stub_complete_io(1); 3623 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3624 stub_complete_io(1); 3625 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3626 3627 /* Also include the reset IO */ 3628 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3629 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3630 poll_threads(); 3631 stub_complete_io(1); 3632 poll_threads(); 3633 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3634 3635 /* This is part2 3636 * Test the desc timeout poller register 3637 */ 3638 3639 /* Successfully set the timeout */ 3640 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3641 CU_ASSERT(desc->io_timeout_poller != NULL); 3642 CU_ASSERT(desc->timeout_in_sec == 30); 3643 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3644 CU_ASSERT(desc->cb_arg == &cb_arg); 3645 3646 /* Change the timeout limit */ 3647 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3648 CU_ASSERT(desc->io_timeout_poller != NULL); 3649 CU_ASSERT(desc->timeout_in_sec == 20); 3650 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3651 CU_ASSERT(desc->cb_arg == &cb_arg); 3652 3653 /* Disable the timeout */ 3654 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 3655 CU_ASSERT(desc->io_timeout_poller == NULL); 3656 3657 /* This the part3 3658 * We will test to catch timeout IO and check whether the IO is 3659 * the submitted one. 3660 */ 3661 memset(&cb_arg, 0, sizeof(cb_arg)); 3662 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3663 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 3664 3665 /* Don't reach the limit */ 3666 spdk_delay_us(15 * spdk_get_ticks_hz()); 3667 poll_threads(); 3668 CU_ASSERT(cb_arg.type == 0); 3669 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3670 CU_ASSERT(cb_arg.iov.iov_len == 0); 3671 3672 /* 15 + 15 = 30 reach the limit */ 3673 spdk_delay_us(15 * spdk_get_ticks_hz()); 3674 poll_threads(); 3675 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3676 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 3677 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 3678 stub_complete_io(1); 3679 3680 /* Use the same split IO above and check the IO */ 3681 memset(&cb_arg, 0, sizeof(cb_arg)); 3682 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3683 3684 /* The first child complete in time */ 3685 spdk_delay_us(15 * spdk_get_ticks_hz()); 3686 poll_threads(); 3687 stub_complete_io(1); 3688 CU_ASSERT(cb_arg.type == 0); 3689 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3690 CU_ASSERT(cb_arg.iov.iov_len == 0); 3691 3692 /* The second child reach the limit */ 3693 spdk_delay_us(15 * spdk_get_ticks_hz()); 3694 poll_threads(); 3695 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3696 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 3697 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 3698 stub_complete_io(1); 3699 3700 /* Also include the reset IO */ 3701 memset(&cb_arg, 0, sizeof(cb_arg)); 3702 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3703 spdk_delay_us(30 * spdk_get_ticks_hz()); 3704 poll_threads(); 3705 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 3706 stub_complete_io(1); 3707 poll_threads(); 3708 3709 spdk_put_io_channel(io_ch); 3710 spdk_bdev_close(desc); 3711 free_bdev(bdev); 3712 spdk_bdev_finish(bdev_fini_cb, NULL); 3713 poll_threads(); 3714 } 3715 3716 static void 3717 lba_range_overlap(void) 3718 { 3719 struct lba_range r1, r2; 3720 3721 r1.offset = 100; 3722 r1.length = 50; 3723 3724 r2.offset = 0; 3725 r2.length = 1; 3726 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3727 3728 r2.offset = 0; 3729 r2.length = 100; 3730 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3731 3732 r2.offset = 0; 3733 r2.length = 110; 3734 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3735 3736 r2.offset = 100; 3737 r2.length = 10; 3738 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3739 3740 r2.offset = 110; 3741 r2.length = 20; 3742 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3743 3744 r2.offset = 140; 3745 r2.length = 150; 3746 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3747 3748 r2.offset = 130; 3749 r2.length = 200; 3750 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3751 3752 r2.offset = 150; 3753 r2.length = 100; 3754 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3755 3756 r2.offset = 110; 3757 r2.length = 0; 3758 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3759 } 3760 3761 static bool g_lock_lba_range_done; 3762 static bool g_unlock_lba_range_done; 3763 3764 static void 3765 lock_lba_range_done(void *ctx, int status) 3766 { 3767 g_lock_lba_range_done = true; 3768 } 3769 3770 static void 3771 unlock_lba_range_done(void *ctx, int status) 3772 { 3773 g_unlock_lba_range_done = true; 3774 } 3775 3776 static void 3777 lock_lba_range_check_ranges(void) 3778 { 3779 struct spdk_bdev *bdev; 3780 struct spdk_bdev_desc *desc = NULL; 3781 struct spdk_io_channel *io_ch; 3782 struct spdk_bdev_channel *channel; 3783 struct lba_range *range; 3784 int ctx1; 3785 int rc; 3786 3787 spdk_bdev_initialize(bdev_init_cb, NULL); 3788 3789 bdev = allocate_bdev("bdev0"); 3790 3791 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3792 CU_ASSERT(rc == 0); 3793 CU_ASSERT(desc != NULL); 3794 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3795 io_ch = spdk_bdev_get_io_channel(desc); 3796 CU_ASSERT(io_ch != NULL); 3797 channel = spdk_io_channel_get_ctx(io_ch); 3798 3799 g_lock_lba_range_done = false; 3800 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3801 CU_ASSERT(rc == 0); 3802 poll_threads(); 3803 3804 CU_ASSERT(g_lock_lba_range_done == true); 3805 range = TAILQ_FIRST(&channel->locked_ranges); 3806 SPDK_CU_ASSERT_FATAL(range != NULL); 3807 CU_ASSERT(range->offset == 20); 3808 CU_ASSERT(range->length == 10); 3809 CU_ASSERT(range->owner_ch == channel); 3810 3811 /* Unlocks must exactly match a lock. */ 3812 g_unlock_lba_range_done = false; 3813 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 3814 CU_ASSERT(rc == -EINVAL); 3815 CU_ASSERT(g_unlock_lba_range_done == false); 3816 3817 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3818 CU_ASSERT(rc == 0); 3819 spdk_delay_us(100); 3820 poll_threads(); 3821 3822 CU_ASSERT(g_unlock_lba_range_done == true); 3823 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3824 3825 spdk_put_io_channel(io_ch); 3826 spdk_bdev_close(desc); 3827 free_bdev(bdev); 3828 spdk_bdev_finish(bdev_fini_cb, NULL); 3829 poll_threads(); 3830 } 3831 3832 static void 3833 lock_lba_range_with_io_outstanding(void) 3834 { 3835 struct spdk_bdev *bdev; 3836 struct spdk_bdev_desc *desc = NULL; 3837 struct spdk_io_channel *io_ch; 3838 struct spdk_bdev_channel *channel; 3839 struct lba_range *range; 3840 char buf[4096]; 3841 int ctx1; 3842 int rc; 3843 3844 spdk_bdev_initialize(bdev_init_cb, NULL); 3845 3846 bdev = allocate_bdev("bdev0"); 3847 3848 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3849 CU_ASSERT(rc == 0); 3850 CU_ASSERT(desc != NULL); 3851 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3852 io_ch = spdk_bdev_get_io_channel(desc); 3853 CU_ASSERT(io_ch != NULL); 3854 channel = spdk_io_channel_get_ctx(io_ch); 3855 3856 g_io_done = false; 3857 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3858 CU_ASSERT(rc == 0); 3859 3860 g_lock_lba_range_done = false; 3861 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3862 CU_ASSERT(rc == 0); 3863 poll_threads(); 3864 3865 /* The lock should immediately become valid, since there are no outstanding 3866 * write I/O. 3867 */ 3868 CU_ASSERT(g_io_done == false); 3869 CU_ASSERT(g_lock_lba_range_done == true); 3870 range = TAILQ_FIRST(&channel->locked_ranges); 3871 SPDK_CU_ASSERT_FATAL(range != NULL); 3872 CU_ASSERT(range->offset == 20); 3873 CU_ASSERT(range->length == 10); 3874 CU_ASSERT(range->owner_ch == channel); 3875 CU_ASSERT(range->locked_ctx == &ctx1); 3876 3877 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3878 CU_ASSERT(rc == 0); 3879 stub_complete_io(1); 3880 spdk_delay_us(100); 3881 poll_threads(); 3882 3883 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3884 3885 /* Now try again, but with a write I/O. */ 3886 g_io_done = false; 3887 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3888 CU_ASSERT(rc == 0); 3889 3890 g_lock_lba_range_done = false; 3891 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3892 CU_ASSERT(rc == 0); 3893 poll_threads(); 3894 3895 /* The lock should not be fully valid yet, since a write I/O is outstanding. 3896 * But note that the range should be on the channel's locked_list, to make sure no 3897 * new write I/O are started. 3898 */ 3899 CU_ASSERT(g_io_done == false); 3900 CU_ASSERT(g_lock_lba_range_done == false); 3901 range = TAILQ_FIRST(&channel->locked_ranges); 3902 SPDK_CU_ASSERT_FATAL(range != NULL); 3903 CU_ASSERT(range->offset == 20); 3904 CU_ASSERT(range->length == 10); 3905 3906 /* Complete the write I/O. This should make the lock valid (checked by confirming 3907 * our callback was invoked). 3908 */ 3909 stub_complete_io(1); 3910 spdk_delay_us(100); 3911 poll_threads(); 3912 CU_ASSERT(g_io_done == true); 3913 CU_ASSERT(g_lock_lba_range_done == true); 3914 3915 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3916 CU_ASSERT(rc == 0); 3917 poll_threads(); 3918 3919 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3920 3921 spdk_put_io_channel(io_ch); 3922 spdk_bdev_close(desc); 3923 free_bdev(bdev); 3924 spdk_bdev_finish(bdev_fini_cb, NULL); 3925 poll_threads(); 3926 } 3927 3928 static void 3929 lock_lba_range_overlapped(void) 3930 { 3931 struct spdk_bdev *bdev; 3932 struct spdk_bdev_desc *desc = NULL; 3933 struct spdk_io_channel *io_ch; 3934 struct spdk_bdev_channel *channel; 3935 struct lba_range *range; 3936 int ctx1; 3937 int rc; 3938 3939 spdk_bdev_initialize(bdev_init_cb, NULL); 3940 3941 bdev = allocate_bdev("bdev0"); 3942 3943 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3944 CU_ASSERT(rc == 0); 3945 CU_ASSERT(desc != NULL); 3946 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3947 io_ch = spdk_bdev_get_io_channel(desc); 3948 CU_ASSERT(io_ch != NULL); 3949 channel = spdk_io_channel_get_ctx(io_ch); 3950 3951 /* Lock range 20-29. */ 3952 g_lock_lba_range_done = false; 3953 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3954 CU_ASSERT(rc == 0); 3955 poll_threads(); 3956 3957 CU_ASSERT(g_lock_lba_range_done == true); 3958 range = TAILQ_FIRST(&channel->locked_ranges); 3959 SPDK_CU_ASSERT_FATAL(range != NULL); 3960 CU_ASSERT(range->offset == 20); 3961 CU_ASSERT(range->length == 10); 3962 3963 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 3964 * 20-29. 3965 */ 3966 g_lock_lba_range_done = false; 3967 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 3968 CU_ASSERT(rc == 0); 3969 poll_threads(); 3970 3971 CU_ASSERT(g_lock_lba_range_done == false); 3972 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3973 SPDK_CU_ASSERT_FATAL(range != NULL); 3974 CU_ASSERT(range->offset == 25); 3975 CU_ASSERT(range->length == 15); 3976 3977 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 3978 * no longer overlaps with an active lock. 3979 */ 3980 g_unlock_lba_range_done = false; 3981 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3982 CU_ASSERT(rc == 0); 3983 poll_threads(); 3984 3985 CU_ASSERT(g_unlock_lba_range_done == true); 3986 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3987 range = TAILQ_FIRST(&channel->locked_ranges); 3988 SPDK_CU_ASSERT_FATAL(range != NULL); 3989 CU_ASSERT(range->offset == 25); 3990 CU_ASSERT(range->length == 15); 3991 3992 /* Lock 40-59. This should immediately lock since it does not overlap with the 3993 * currently active 25-39 lock. 3994 */ 3995 g_lock_lba_range_done = false; 3996 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 3997 CU_ASSERT(rc == 0); 3998 poll_threads(); 3999 4000 CU_ASSERT(g_lock_lba_range_done == true); 4001 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4002 SPDK_CU_ASSERT_FATAL(range != NULL); 4003 range = TAILQ_NEXT(range, tailq); 4004 SPDK_CU_ASSERT_FATAL(range != NULL); 4005 CU_ASSERT(range->offset == 40); 4006 CU_ASSERT(range->length == 20); 4007 4008 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4009 g_lock_lba_range_done = false; 4010 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4011 CU_ASSERT(rc == 0); 4012 poll_threads(); 4013 4014 CU_ASSERT(g_lock_lba_range_done == false); 4015 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4016 SPDK_CU_ASSERT_FATAL(range != NULL); 4017 CU_ASSERT(range->offset == 35); 4018 CU_ASSERT(range->length == 10); 4019 4020 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4021 * the 40-59 lock is still active. 4022 */ 4023 g_unlock_lba_range_done = false; 4024 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4025 CU_ASSERT(rc == 0); 4026 poll_threads(); 4027 4028 CU_ASSERT(g_unlock_lba_range_done == true); 4029 CU_ASSERT(g_lock_lba_range_done == false); 4030 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4031 SPDK_CU_ASSERT_FATAL(range != NULL); 4032 CU_ASSERT(range->offset == 35); 4033 CU_ASSERT(range->length == 10); 4034 4035 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4036 * no longer any active overlapping locks. 4037 */ 4038 g_unlock_lba_range_done = false; 4039 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4040 CU_ASSERT(rc == 0); 4041 poll_threads(); 4042 4043 CU_ASSERT(g_unlock_lba_range_done == true); 4044 CU_ASSERT(g_lock_lba_range_done == true); 4045 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4046 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4047 SPDK_CU_ASSERT_FATAL(range != NULL); 4048 CU_ASSERT(range->offset == 35); 4049 CU_ASSERT(range->length == 10); 4050 4051 /* Finally, unlock 35-44. */ 4052 g_unlock_lba_range_done = false; 4053 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4054 CU_ASSERT(rc == 0); 4055 poll_threads(); 4056 4057 CU_ASSERT(g_unlock_lba_range_done == true); 4058 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4059 4060 spdk_put_io_channel(io_ch); 4061 spdk_bdev_close(desc); 4062 free_bdev(bdev); 4063 spdk_bdev_finish(bdev_fini_cb, NULL); 4064 poll_threads(); 4065 } 4066 4067 static void 4068 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4069 { 4070 g_abort_done = true; 4071 g_abort_status = bdev_io->internal.status; 4072 spdk_bdev_free_io(bdev_io); 4073 } 4074 4075 static void 4076 bdev_io_abort(void) 4077 { 4078 struct spdk_bdev *bdev; 4079 struct spdk_bdev_desc *desc = NULL; 4080 struct spdk_io_channel *io_ch; 4081 struct spdk_bdev_channel *channel; 4082 struct spdk_bdev_mgmt_channel *mgmt_ch; 4083 struct spdk_bdev_opts bdev_opts = {}; 4084 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4085 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4086 int rc; 4087 4088 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4089 bdev_opts.bdev_io_pool_size = 7; 4090 bdev_opts.bdev_io_cache_size = 2; 4091 4092 rc = spdk_bdev_set_opts(&bdev_opts); 4093 CU_ASSERT(rc == 0); 4094 spdk_bdev_initialize(bdev_init_cb, NULL); 4095 4096 bdev = allocate_bdev("bdev0"); 4097 4098 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4099 CU_ASSERT(rc == 0); 4100 CU_ASSERT(desc != NULL); 4101 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4102 io_ch = spdk_bdev_get_io_channel(desc); 4103 CU_ASSERT(io_ch != NULL); 4104 channel = spdk_io_channel_get_ctx(io_ch); 4105 mgmt_ch = channel->shared_resource->mgmt_ch; 4106 4107 g_abort_done = false; 4108 4109 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4110 4111 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4112 CU_ASSERT(rc == -ENOTSUP); 4113 4114 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4115 4116 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4117 CU_ASSERT(rc == 0); 4118 CU_ASSERT(g_abort_done == true); 4119 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4120 4121 /* Test the case that the target I/O was successfully aborted. */ 4122 g_io_done = false; 4123 4124 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4125 CU_ASSERT(rc == 0); 4126 CU_ASSERT(g_io_done == false); 4127 4128 g_abort_done = false; 4129 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4130 4131 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4132 CU_ASSERT(rc == 0); 4133 CU_ASSERT(g_io_done == true); 4134 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4135 stub_complete_io(1); 4136 CU_ASSERT(g_abort_done == true); 4137 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4138 4139 /* Test the case that the target I/O was not aborted because it completed 4140 * in the middle of execution of the abort. 4141 */ 4142 g_io_done = false; 4143 4144 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4145 CU_ASSERT(rc == 0); 4146 CU_ASSERT(g_io_done == false); 4147 4148 g_abort_done = false; 4149 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4150 4151 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4152 CU_ASSERT(rc == 0); 4153 CU_ASSERT(g_io_done == false); 4154 4155 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4156 stub_complete_io(1); 4157 CU_ASSERT(g_io_done == true); 4158 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4159 4160 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4161 stub_complete_io(1); 4162 CU_ASSERT(g_abort_done == true); 4163 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4164 4165 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4166 4167 bdev->optimal_io_boundary = 16; 4168 bdev->split_on_optimal_io_boundary = true; 4169 4170 /* Test that a single-vector command which is split is aborted correctly. 4171 * Offset 14, length 8, payload 0xF000 4172 * Child - Offset 14, length 2, payload 0xF000 4173 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4174 */ 4175 g_io_done = false; 4176 4177 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4178 CU_ASSERT(rc == 0); 4179 CU_ASSERT(g_io_done == false); 4180 4181 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4182 4183 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4184 4185 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4186 CU_ASSERT(rc == 0); 4187 CU_ASSERT(g_io_done == true); 4188 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4189 stub_complete_io(2); 4190 CU_ASSERT(g_abort_done == true); 4191 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4192 4193 /* Test that a multi-vector command that needs to be split by strip and then 4194 * needs to be split is aborted correctly. Abort is requested before the second 4195 * child I/O was submitted. The parent I/O should complete with failure without 4196 * submitting the second child I/O. 4197 */ 4198 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4199 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4200 iov[i].iov_len = 512; 4201 } 4202 4203 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4204 g_io_done = false; 4205 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4206 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4207 CU_ASSERT(rc == 0); 4208 CU_ASSERT(g_io_done == false); 4209 4210 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4211 4212 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4213 4214 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4215 CU_ASSERT(rc == 0); 4216 CU_ASSERT(g_io_done == true); 4217 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4218 stub_complete_io(1); 4219 CU_ASSERT(g_abort_done == true); 4220 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4221 4222 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4223 4224 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4225 4226 bdev->optimal_io_boundary = 16; 4227 g_io_done = false; 4228 4229 /* Test that a ingle-vector command which is split is aborted correctly. 4230 * Differently from the above, the child abort request will be submitted 4231 * sequentially due to the capacity of spdk_bdev_io. 4232 */ 4233 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4234 CU_ASSERT(rc == 0); 4235 CU_ASSERT(g_io_done == false); 4236 4237 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4238 4239 g_abort_done = false; 4240 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4241 4242 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4243 CU_ASSERT(rc == 0); 4244 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4245 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4246 4247 stub_complete_io(1); 4248 CU_ASSERT(g_io_done == true); 4249 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4250 stub_complete_io(3); 4251 CU_ASSERT(g_abort_done == true); 4252 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4253 4254 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4255 4256 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4257 4258 spdk_put_io_channel(io_ch); 4259 spdk_bdev_close(desc); 4260 free_bdev(bdev); 4261 spdk_bdev_finish(bdev_fini_cb, NULL); 4262 poll_threads(); 4263 } 4264 4265 static void 4266 bdev_unmap(void) 4267 { 4268 struct spdk_bdev *bdev; 4269 struct spdk_bdev_desc *desc = NULL; 4270 struct spdk_io_channel *ioch; 4271 struct spdk_bdev_channel *bdev_ch; 4272 struct ut_expected_io *expected_io; 4273 struct spdk_bdev_opts bdev_opts = {}; 4274 uint32_t i, num_outstanding; 4275 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4276 int rc; 4277 4278 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4279 bdev_opts.bdev_io_pool_size = 512; 4280 bdev_opts.bdev_io_cache_size = 64; 4281 rc = spdk_bdev_set_opts(&bdev_opts); 4282 CU_ASSERT(rc == 0); 4283 4284 spdk_bdev_initialize(bdev_init_cb, NULL); 4285 bdev = allocate_bdev("bdev"); 4286 4287 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4288 CU_ASSERT_EQUAL(rc, 0); 4289 SPDK_CU_ASSERT_FATAL(desc != NULL); 4290 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4291 ioch = spdk_bdev_get_io_channel(desc); 4292 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4293 bdev_ch = spdk_io_channel_get_ctx(ioch); 4294 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4295 4296 fn_table.submit_request = stub_submit_request; 4297 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4298 4299 /* Case 1: First test the request won't be split */ 4300 num_blocks = 32; 4301 4302 g_io_done = false; 4303 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4304 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4305 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4306 CU_ASSERT_EQUAL(rc, 0); 4307 CU_ASSERT(g_io_done == false); 4308 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4309 stub_complete_io(1); 4310 CU_ASSERT(g_io_done == true); 4311 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4312 4313 /* Case 2: Test the split with 2 children requests */ 4314 bdev->max_unmap = 8; 4315 bdev->max_unmap_segments = 2; 4316 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4317 num_blocks = max_unmap_blocks * 2; 4318 offset = 0; 4319 4320 g_io_done = false; 4321 for (i = 0; i < 2; i++) { 4322 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4323 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4324 offset += max_unmap_blocks; 4325 } 4326 4327 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4328 CU_ASSERT_EQUAL(rc, 0); 4329 CU_ASSERT(g_io_done == false); 4330 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4331 stub_complete_io(2); 4332 CU_ASSERT(g_io_done == true); 4333 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4334 4335 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4336 num_children = 15; 4337 num_blocks = max_unmap_blocks * num_children; 4338 g_io_done = false; 4339 offset = 0; 4340 for (i = 0; i < num_children; i++) { 4341 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4342 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4343 offset += max_unmap_blocks; 4344 } 4345 4346 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4347 CU_ASSERT_EQUAL(rc, 0); 4348 CU_ASSERT(g_io_done == false); 4349 4350 while (num_children > 0) { 4351 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_REQS); 4352 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4353 stub_complete_io(num_outstanding); 4354 num_children -= num_outstanding; 4355 } 4356 CU_ASSERT(g_io_done == true); 4357 4358 spdk_put_io_channel(ioch); 4359 spdk_bdev_close(desc); 4360 free_bdev(bdev); 4361 spdk_bdev_finish(bdev_fini_cb, NULL); 4362 poll_threads(); 4363 } 4364 4365 static void 4366 bdev_set_options_test(void) 4367 { 4368 struct spdk_bdev_opts bdev_opts = {}; 4369 int rc; 4370 4371 /* Case1: Do not set opts_size */ 4372 rc = spdk_bdev_set_opts(&bdev_opts); 4373 CU_ASSERT(rc == -1); 4374 4375 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4376 bdev_opts.bdev_io_pool_size = 4; 4377 bdev_opts.bdev_io_cache_size = 2; 4378 bdev_opts.small_buf_pool_size = 4; 4379 4380 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4381 rc = spdk_bdev_set_opts(&bdev_opts); 4382 CU_ASSERT(rc == -1); 4383 4384 /* Case 3: Do not set valid large_buf_pool_size */ 4385 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4386 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4387 rc = spdk_bdev_set_opts(&bdev_opts); 4388 CU_ASSERT(rc == -1); 4389 4390 /* Case4: set valid large buf_pool_size */ 4391 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4392 rc = spdk_bdev_set_opts(&bdev_opts); 4393 CU_ASSERT(rc == 0); 4394 4395 /* Case5: Set different valid value for small and large buf pool */ 4396 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4397 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4398 rc = spdk_bdev_set_opts(&bdev_opts); 4399 CU_ASSERT(rc == 0); 4400 } 4401 4402 static uint64_t 4403 get_ns_time(void) 4404 { 4405 int rc; 4406 struct timespec ts; 4407 4408 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 4409 CU_ASSERT(rc == 0); 4410 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 4411 } 4412 4413 static int 4414 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 4415 { 4416 int h1, h2; 4417 4418 if (bdev_name == NULL) { 4419 return -1; 4420 } else { 4421 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 4422 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 4423 4424 return spdk_max(h1, h2) + 1; 4425 } 4426 } 4427 4428 static void 4429 bdev_multi_allocation(void) 4430 { 4431 const int max_bdev_num = 1024 * 16; 4432 char name[max_bdev_num][10]; 4433 char noexist_name[] = "invalid_bdev"; 4434 struct spdk_bdev *bdev[max_bdev_num]; 4435 int i, j; 4436 uint64_t last_time; 4437 int bdev_num; 4438 int height; 4439 4440 for (j = 0; j < max_bdev_num; j++) { 4441 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 4442 } 4443 4444 for (i = 0; i < 16; i++) { 4445 last_time = get_ns_time(); 4446 bdev_num = 1024 * (i + 1); 4447 for (j = 0; j < bdev_num; j++) { 4448 bdev[j] = allocate_bdev(name[j]); 4449 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 4450 CU_ASSERT(height <= (int)(spdk_u32log2(j + 1))); 4451 } 4452 SPDK_NOTICELOG("alloc bdev num %d takes %lu ms\n", bdev_num, 4453 (get_ns_time() - last_time) / 1000 / 1000); 4454 for (j = 0; j < bdev_num; j++) { 4455 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 4456 } 4457 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 4458 4459 for (j = 0; j < bdev_num; j++) { 4460 free_bdev(bdev[j]); 4461 } 4462 for (j = 0; j < bdev_num; j++) { 4463 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 4464 } 4465 } 4466 } 4467 4468 int 4469 main(int argc, char **argv) 4470 { 4471 CU_pSuite suite = NULL; 4472 unsigned int num_failures; 4473 4474 CU_set_error_action(CUEA_ABORT); 4475 CU_initialize_registry(); 4476 4477 suite = CU_add_suite("bdev", null_init, null_clean); 4478 4479 CU_ADD_TEST(suite, bytes_to_blocks_test); 4480 CU_ADD_TEST(suite, num_blocks_test); 4481 CU_ADD_TEST(suite, io_valid_test); 4482 CU_ADD_TEST(suite, open_write_test); 4483 CU_ADD_TEST(suite, alias_add_del_test); 4484 CU_ADD_TEST(suite, get_device_stat_test); 4485 CU_ADD_TEST(suite, bdev_io_types_test); 4486 CU_ADD_TEST(suite, bdev_io_wait_test); 4487 CU_ADD_TEST(suite, bdev_io_spans_split_test); 4488 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 4489 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 4490 CU_ADD_TEST(suite, bdev_io_mix_split_test); 4491 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 4492 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 4493 CU_ADD_TEST(suite, bdev_io_alignment); 4494 CU_ADD_TEST(suite, bdev_histograms); 4495 CU_ADD_TEST(suite, bdev_write_zeroes); 4496 CU_ADD_TEST(suite, bdev_compare_and_write); 4497 CU_ADD_TEST(suite, bdev_compare); 4498 CU_ADD_TEST(suite, bdev_open_while_hotremove); 4499 CU_ADD_TEST(suite, bdev_close_while_hotremove); 4500 CU_ADD_TEST(suite, bdev_open_ext); 4501 CU_ADD_TEST(suite, bdev_set_io_timeout); 4502 CU_ADD_TEST(suite, lba_range_overlap); 4503 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 4504 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 4505 CU_ADD_TEST(suite, lock_lba_range_overlapped); 4506 CU_ADD_TEST(suite, bdev_io_abort); 4507 CU_ADD_TEST(suite, bdev_unmap); 4508 CU_ADD_TEST(suite, bdev_set_options_test); 4509 CU_ADD_TEST(suite, bdev_multi_allocation); 4510 4511 allocate_cores(1); 4512 allocate_threads(1); 4513 set_thread(0); 4514 4515 CU_basic_set_mode(CU_BRM_VERBOSE); 4516 CU_basic_run_tests(); 4517 num_failures = CU_get_number_of_failures(); 4518 CU_cleanup_registry(); 4519 4520 free_threads(); 4521 free_cores(); 4522 4523 return num_failures; 4524 } 4525