1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 struct spdk_trace_histories *g_trace_histories; 46 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 47 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 48 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 49 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 50 uint16_t tpoint_id, uint8_t owner_type, 51 uint8_t object_type, uint8_t new_object, 52 uint8_t arg1_type, const char *arg1_name)); 53 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 54 uint32_t size, uint64_t object_id, uint64_t arg1)); 55 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 56 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 57 58 59 int g_status; 60 int g_count; 61 enum spdk_bdev_event_type g_event_type1; 62 enum spdk_bdev_event_type g_event_type2; 63 struct spdk_histogram_data *g_histogram; 64 void *g_unregister_arg; 65 int g_unregister_rc; 66 67 void 68 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 69 int *sc, int *sk, int *asc, int *ascq) 70 { 71 } 72 73 static int 74 null_init(void) 75 { 76 return 0; 77 } 78 79 static int 80 null_clean(void) 81 { 82 return 0; 83 } 84 85 static int 86 stub_destruct(void *ctx) 87 { 88 return 0; 89 } 90 91 struct ut_expected_io { 92 uint8_t type; 93 uint64_t offset; 94 uint64_t length; 95 int iovcnt; 96 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 97 void *md_buf; 98 TAILQ_ENTRY(ut_expected_io) link; 99 }; 100 101 struct bdev_ut_channel { 102 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 103 uint32_t outstanding_io_count; 104 TAILQ_HEAD(, ut_expected_io) expected_io; 105 }; 106 107 static bool g_io_done; 108 static struct spdk_bdev_io *g_bdev_io; 109 static enum spdk_bdev_io_status g_io_status; 110 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 111 static uint32_t g_bdev_ut_io_device; 112 static struct bdev_ut_channel *g_bdev_ut_channel; 113 static void *g_compare_read_buf; 114 static uint32_t g_compare_read_buf_len; 115 static void *g_compare_write_buf; 116 static uint32_t g_compare_write_buf_len; 117 static bool g_abort_done; 118 static enum spdk_bdev_io_status g_abort_status; 119 120 static struct ut_expected_io * 121 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 122 { 123 struct ut_expected_io *expected_io; 124 125 expected_io = calloc(1, sizeof(*expected_io)); 126 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 127 128 expected_io->type = type; 129 expected_io->offset = offset; 130 expected_io->length = length; 131 expected_io->iovcnt = iovcnt; 132 133 return expected_io; 134 } 135 136 static void 137 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 138 { 139 expected_io->iov[pos].iov_base = base; 140 expected_io->iov[pos].iov_len = len; 141 } 142 143 static void 144 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 145 { 146 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 147 struct ut_expected_io *expected_io; 148 struct iovec *iov, *expected_iov; 149 struct spdk_bdev_io *bio_to_abort; 150 int i; 151 152 g_bdev_io = bdev_io; 153 154 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 155 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 156 157 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 158 CU_ASSERT(g_compare_read_buf_len == len); 159 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 160 } 161 162 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 163 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 164 165 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 166 CU_ASSERT(g_compare_write_buf_len == len); 167 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 168 } 169 170 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 171 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 172 173 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 174 CU_ASSERT(g_compare_read_buf_len == len); 175 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 176 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 177 } 178 } 179 180 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 181 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 182 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 183 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 184 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 185 ch->outstanding_io_count--; 186 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 187 break; 188 } 189 } 190 } 191 } 192 193 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 194 ch->outstanding_io_count++; 195 196 expected_io = TAILQ_FIRST(&ch->expected_io); 197 if (expected_io == NULL) { 198 return; 199 } 200 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 201 202 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 203 CU_ASSERT(bdev_io->type == expected_io->type); 204 } 205 206 if (expected_io->md_buf != NULL) { 207 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 208 } 209 210 if (expected_io->length == 0) { 211 free(expected_io); 212 return; 213 } 214 215 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 216 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 217 218 if (expected_io->iovcnt == 0) { 219 free(expected_io); 220 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 221 return; 222 } 223 224 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 225 for (i = 0; i < expected_io->iovcnt; i++) { 226 iov = &bdev_io->u.bdev.iovs[i]; 227 expected_iov = &expected_io->iov[i]; 228 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 229 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 230 } 231 232 free(expected_io); 233 } 234 235 static void 236 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 237 struct spdk_bdev_io *bdev_io, bool success) 238 { 239 CU_ASSERT(success == true); 240 241 stub_submit_request(_ch, bdev_io); 242 } 243 244 static void 245 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 246 { 247 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 248 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 249 } 250 251 static uint32_t 252 stub_complete_io(uint32_t num_to_complete) 253 { 254 struct bdev_ut_channel *ch = g_bdev_ut_channel; 255 struct spdk_bdev_io *bdev_io; 256 static enum spdk_bdev_io_status io_status; 257 uint32_t num_completed = 0; 258 259 while (num_completed < num_to_complete) { 260 if (TAILQ_EMPTY(&ch->outstanding_io)) { 261 break; 262 } 263 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 264 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 265 ch->outstanding_io_count--; 266 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 267 g_io_exp_status; 268 spdk_bdev_io_complete(bdev_io, io_status); 269 num_completed++; 270 } 271 272 return num_completed; 273 } 274 275 static struct spdk_io_channel * 276 bdev_ut_get_io_channel(void *ctx) 277 { 278 return spdk_get_io_channel(&g_bdev_ut_io_device); 279 } 280 281 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 282 [SPDK_BDEV_IO_TYPE_READ] = true, 283 [SPDK_BDEV_IO_TYPE_WRITE] = true, 284 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 285 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 286 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 287 [SPDK_BDEV_IO_TYPE_RESET] = true, 288 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 289 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 290 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 291 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 292 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 293 [SPDK_BDEV_IO_TYPE_ABORT] = true, 294 }; 295 296 static void 297 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 298 { 299 g_io_types_supported[io_type] = enable; 300 } 301 302 static bool 303 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 304 { 305 return g_io_types_supported[io_type]; 306 } 307 308 static struct spdk_bdev_fn_table fn_table = { 309 .destruct = stub_destruct, 310 .submit_request = stub_submit_request, 311 .get_io_channel = bdev_ut_get_io_channel, 312 .io_type_supported = stub_io_type_supported, 313 }; 314 315 static int 316 bdev_ut_create_ch(void *io_device, void *ctx_buf) 317 { 318 struct bdev_ut_channel *ch = ctx_buf; 319 320 CU_ASSERT(g_bdev_ut_channel == NULL); 321 g_bdev_ut_channel = ch; 322 323 TAILQ_INIT(&ch->outstanding_io); 324 ch->outstanding_io_count = 0; 325 TAILQ_INIT(&ch->expected_io); 326 return 0; 327 } 328 329 static void 330 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 331 { 332 CU_ASSERT(g_bdev_ut_channel != NULL); 333 g_bdev_ut_channel = NULL; 334 } 335 336 struct spdk_bdev_module bdev_ut_if; 337 338 static int 339 bdev_ut_module_init(void) 340 { 341 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 342 sizeof(struct bdev_ut_channel), NULL); 343 spdk_bdev_module_init_done(&bdev_ut_if); 344 return 0; 345 } 346 347 static void 348 bdev_ut_module_fini(void) 349 { 350 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 351 } 352 353 struct spdk_bdev_module bdev_ut_if = { 354 .name = "bdev_ut", 355 .module_init = bdev_ut_module_init, 356 .module_fini = bdev_ut_module_fini, 357 .async_init = true, 358 }; 359 360 static void vbdev_ut_examine(struct spdk_bdev *bdev); 361 362 static int 363 vbdev_ut_module_init(void) 364 { 365 return 0; 366 } 367 368 static void 369 vbdev_ut_module_fini(void) 370 { 371 } 372 373 struct spdk_bdev_module vbdev_ut_if = { 374 .name = "vbdev_ut", 375 .module_init = vbdev_ut_module_init, 376 .module_fini = vbdev_ut_module_fini, 377 .examine_config = vbdev_ut_examine, 378 }; 379 380 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 381 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 382 383 static void 384 vbdev_ut_examine(struct spdk_bdev *bdev) 385 { 386 spdk_bdev_module_examine_done(&vbdev_ut_if); 387 } 388 389 static struct spdk_bdev * 390 allocate_bdev(char *name) 391 { 392 struct spdk_bdev *bdev; 393 int rc; 394 395 bdev = calloc(1, sizeof(*bdev)); 396 SPDK_CU_ASSERT_FATAL(bdev != NULL); 397 398 bdev->name = name; 399 bdev->fn_table = &fn_table; 400 bdev->module = &bdev_ut_if; 401 bdev->blockcnt = 1024; 402 bdev->blocklen = 512; 403 404 rc = spdk_bdev_register(bdev); 405 CU_ASSERT(rc == 0); 406 407 return bdev; 408 } 409 410 static struct spdk_bdev * 411 allocate_vbdev(char *name) 412 { 413 struct spdk_bdev *bdev; 414 int rc; 415 416 bdev = calloc(1, sizeof(*bdev)); 417 SPDK_CU_ASSERT_FATAL(bdev != NULL); 418 419 bdev->name = name; 420 bdev->fn_table = &fn_table; 421 bdev->module = &vbdev_ut_if; 422 423 rc = spdk_bdev_register(bdev); 424 CU_ASSERT(rc == 0); 425 426 return bdev; 427 } 428 429 static void 430 free_bdev(struct spdk_bdev *bdev) 431 { 432 spdk_bdev_unregister(bdev, NULL, NULL); 433 poll_threads(); 434 memset(bdev, 0xFF, sizeof(*bdev)); 435 free(bdev); 436 } 437 438 static void 439 free_vbdev(struct spdk_bdev *bdev) 440 { 441 spdk_bdev_unregister(bdev, NULL, NULL); 442 poll_threads(); 443 memset(bdev, 0xFF, sizeof(*bdev)); 444 free(bdev); 445 } 446 447 static void 448 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 449 { 450 const char *bdev_name; 451 452 CU_ASSERT(bdev != NULL); 453 CU_ASSERT(rc == 0); 454 bdev_name = spdk_bdev_get_name(bdev); 455 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 456 457 free(stat); 458 459 *(bool *)cb_arg = true; 460 } 461 462 static void 463 bdev_unregister_cb(void *cb_arg, int rc) 464 { 465 g_unregister_arg = cb_arg; 466 g_unregister_rc = rc; 467 } 468 469 static void 470 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 471 { 472 } 473 474 static void 475 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 476 { 477 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 478 479 g_event_type1 = type; 480 if (SPDK_BDEV_EVENT_REMOVE == type) { 481 spdk_bdev_close(desc); 482 } 483 } 484 485 static void 486 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 487 { 488 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 489 490 g_event_type2 = type; 491 if (SPDK_BDEV_EVENT_REMOVE == type) { 492 spdk_bdev_close(desc); 493 } 494 } 495 496 static void 497 get_device_stat_test(void) 498 { 499 struct spdk_bdev *bdev; 500 struct spdk_bdev_io_stat *stat; 501 bool done; 502 503 bdev = allocate_bdev("bdev0"); 504 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 505 if (stat == NULL) { 506 free_bdev(bdev); 507 return; 508 } 509 510 done = false; 511 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 512 while (!done) { poll_threads(); } 513 514 free_bdev(bdev); 515 } 516 517 static void 518 open_write_test(void) 519 { 520 struct spdk_bdev *bdev[9]; 521 struct spdk_bdev_desc *desc[9] = {}; 522 int rc; 523 524 /* 525 * Create a tree of bdevs to test various open w/ write cases. 526 * 527 * bdev0 through bdev3 are physical block devices, such as NVMe 528 * namespaces or Ceph block devices. 529 * 530 * bdev4 is a virtual bdev with multiple base bdevs. This models 531 * caching or RAID use cases. 532 * 533 * bdev5 through bdev7 are all virtual bdevs with the same base 534 * bdev (except bdev7). This models partitioning or logical volume 535 * use cases. 536 * 537 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 538 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 539 * models caching, RAID, partitioning or logical volumes use cases. 540 * 541 * bdev8 is a virtual bdev with multiple base bdevs, but these 542 * base bdevs are themselves virtual bdevs. 543 * 544 * bdev8 545 * | 546 * +----------+ 547 * | | 548 * bdev4 bdev5 bdev6 bdev7 549 * | | | | 550 * +---+---+ +---+ + +---+---+ 551 * | | \ | / \ 552 * bdev0 bdev1 bdev2 bdev3 553 */ 554 555 bdev[0] = allocate_bdev("bdev0"); 556 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 557 CU_ASSERT(rc == 0); 558 559 bdev[1] = allocate_bdev("bdev1"); 560 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 561 CU_ASSERT(rc == 0); 562 563 bdev[2] = allocate_bdev("bdev2"); 564 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 565 CU_ASSERT(rc == 0); 566 567 bdev[3] = allocate_bdev("bdev3"); 568 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 569 CU_ASSERT(rc == 0); 570 571 bdev[4] = allocate_vbdev("bdev4"); 572 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 573 CU_ASSERT(rc == 0); 574 575 bdev[5] = allocate_vbdev("bdev5"); 576 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 577 CU_ASSERT(rc == 0); 578 579 bdev[6] = allocate_vbdev("bdev6"); 580 581 bdev[7] = allocate_vbdev("bdev7"); 582 583 bdev[8] = allocate_vbdev("bdev8"); 584 585 /* Open bdev0 read-only. This should succeed. */ 586 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 587 CU_ASSERT(rc == 0); 588 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 589 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 590 spdk_bdev_close(desc[0]); 591 592 /* 593 * Open bdev1 read/write. This should fail since bdev1 has been claimed 594 * by a vbdev module. 595 */ 596 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 597 CU_ASSERT(rc == -EPERM); 598 599 /* 600 * Open bdev4 read/write. This should fail since bdev3 has been claimed 601 * by a vbdev module. 602 */ 603 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 604 CU_ASSERT(rc == -EPERM); 605 606 /* Open bdev4 read-only. This should succeed. */ 607 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 608 CU_ASSERT(rc == 0); 609 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 610 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 611 spdk_bdev_close(desc[4]); 612 613 /* 614 * Open bdev8 read/write. This should succeed since it is a leaf 615 * bdev. 616 */ 617 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 618 CU_ASSERT(rc == 0); 619 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 620 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 621 spdk_bdev_close(desc[8]); 622 623 /* 624 * Open bdev5 read/write. This should fail since bdev4 has been claimed 625 * by a vbdev module. 626 */ 627 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 628 CU_ASSERT(rc == -EPERM); 629 630 /* Open bdev4 read-only. This should succeed. */ 631 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 632 CU_ASSERT(rc == 0); 633 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 634 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 635 spdk_bdev_close(desc[5]); 636 637 free_vbdev(bdev[8]); 638 639 free_vbdev(bdev[5]); 640 free_vbdev(bdev[6]); 641 free_vbdev(bdev[7]); 642 643 free_vbdev(bdev[4]); 644 645 free_bdev(bdev[0]); 646 free_bdev(bdev[1]); 647 free_bdev(bdev[2]); 648 free_bdev(bdev[3]); 649 } 650 651 static void 652 bytes_to_blocks_test(void) 653 { 654 struct spdk_bdev bdev; 655 uint64_t offset_blocks, num_blocks; 656 657 memset(&bdev, 0, sizeof(bdev)); 658 659 bdev.blocklen = 512; 660 661 /* All parameters valid */ 662 offset_blocks = 0; 663 num_blocks = 0; 664 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 665 CU_ASSERT(offset_blocks == 1); 666 CU_ASSERT(num_blocks == 2); 667 668 /* Offset not a block multiple */ 669 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 670 671 /* Length not a block multiple */ 672 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 673 674 /* In case blocklen not the power of two */ 675 bdev.blocklen = 100; 676 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 677 CU_ASSERT(offset_blocks == 1); 678 CU_ASSERT(num_blocks == 2); 679 680 /* Offset not a block multiple */ 681 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 682 683 /* Length not a block multiple */ 684 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 685 } 686 687 static void 688 num_blocks_test(void) 689 { 690 struct spdk_bdev bdev; 691 struct spdk_bdev_desc *desc = NULL; 692 struct spdk_bdev_desc *desc_ext = NULL; 693 int rc; 694 695 memset(&bdev, 0, sizeof(bdev)); 696 bdev.name = "num_blocks"; 697 bdev.fn_table = &fn_table; 698 bdev.module = &bdev_ut_if; 699 spdk_bdev_register(&bdev); 700 spdk_bdev_notify_blockcnt_change(&bdev, 50); 701 702 /* Growing block number */ 703 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 704 /* Shrinking block number */ 705 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 706 707 /* In case bdev opened */ 708 rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc); 709 CU_ASSERT(rc == 0); 710 SPDK_CU_ASSERT_FATAL(desc != NULL); 711 712 /* Growing block number */ 713 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 714 /* Shrinking block number */ 715 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 716 717 /* In case bdev opened with ext API */ 718 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext); 719 CU_ASSERT(rc == 0); 720 SPDK_CU_ASSERT_FATAL(desc_ext != NULL); 721 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc_ext)); 722 723 g_event_type1 = 0xFF; 724 /* Growing block number */ 725 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 726 727 poll_threads(); 728 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 729 730 g_event_type1 = 0xFF; 731 /* Growing block number and closing */ 732 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 733 734 spdk_bdev_close(desc); 735 spdk_bdev_close(desc_ext); 736 spdk_bdev_unregister(&bdev, NULL, NULL); 737 738 poll_threads(); 739 740 /* Callback is not called for closed device */ 741 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 742 } 743 744 static void 745 io_valid_test(void) 746 { 747 struct spdk_bdev bdev; 748 749 memset(&bdev, 0, sizeof(bdev)); 750 751 bdev.blocklen = 512; 752 spdk_bdev_notify_blockcnt_change(&bdev, 100); 753 754 /* All parameters valid */ 755 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 756 757 /* Last valid block */ 758 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 759 760 /* Offset past end of bdev */ 761 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 762 763 /* Offset + length past end of bdev */ 764 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 765 766 /* Offset near end of uint64_t range (2^64 - 1) */ 767 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 768 } 769 770 static void 771 alias_add_del_test(void) 772 { 773 struct spdk_bdev *bdev[3]; 774 int rc; 775 776 /* Creating and registering bdevs */ 777 bdev[0] = allocate_bdev("bdev0"); 778 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 779 780 bdev[1] = allocate_bdev("bdev1"); 781 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 782 783 bdev[2] = allocate_bdev("bdev2"); 784 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 785 786 poll_threads(); 787 788 /* 789 * Trying adding an alias identical to name. 790 * Alias is identical to name, so it can not be added to aliases list 791 */ 792 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 793 CU_ASSERT(rc == -EEXIST); 794 795 /* 796 * Trying to add empty alias, 797 * this one should fail 798 */ 799 rc = spdk_bdev_alias_add(bdev[0], NULL); 800 CU_ASSERT(rc == -EINVAL); 801 802 /* Trying adding same alias to two different registered bdevs */ 803 804 /* Alias is used first time, so this one should pass */ 805 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 806 CU_ASSERT(rc == 0); 807 808 /* Alias was added to another bdev, so this one should fail */ 809 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 810 CU_ASSERT(rc == -EEXIST); 811 812 /* Alias is used first time, so this one should pass */ 813 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 814 CU_ASSERT(rc == 0); 815 816 /* Trying removing an alias from registered bdevs */ 817 818 /* Alias is not on a bdev aliases list, so this one should fail */ 819 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 820 CU_ASSERT(rc == -ENOENT); 821 822 /* Alias is present on a bdev aliases list, so this one should pass */ 823 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 824 CU_ASSERT(rc == 0); 825 826 /* Alias is present on a bdev aliases list, so this one should pass */ 827 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 828 CU_ASSERT(rc == 0); 829 830 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 831 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 832 CU_ASSERT(rc != 0); 833 834 /* Trying to del all alias from empty alias list */ 835 spdk_bdev_alias_del_all(bdev[2]); 836 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 837 838 /* Trying to del all alias from non-empty alias list */ 839 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 840 CU_ASSERT(rc == 0); 841 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 842 CU_ASSERT(rc == 0); 843 spdk_bdev_alias_del_all(bdev[2]); 844 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 845 846 /* Unregister and free bdevs */ 847 spdk_bdev_unregister(bdev[0], NULL, NULL); 848 spdk_bdev_unregister(bdev[1], NULL, NULL); 849 spdk_bdev_unregister(bdev[2], NULL, NULL); 850 851 poll_threads(); 852 853 free(bdev[0]); 854 free(bdev[1]); 855 free(bdev[2]); 856 } 857 858 static void 859 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 860 { 861 g_io_done = true; 862 g_io_status = bdev_io->internal.status; 863 spdk_bdev_free_io(bdev_io); 864 } 865 866 static void 867 bdev_init_cb(void *arg, int rc) 868 { 869 CU_ASSERT(rc == 0); 870 } 871 872 static void 873 bdev_fini_cb(void *arg) 874 { 875 } 876 877 struct bdev_ut_io_wait_entry { 878 struct spdk_bdev_io_wait_entry entry; 879 struct spdk_io_channel *io_ch; 880 struct spdk_bdev_desc *desc; 881 bool submitted; 882 }; 883 884 static void 885 io_wait_cb(void *arg) 886 { 887 struct bdev_ut_io_wait_entry *entry = arg; 888 int rc; 889 890 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 891 CU_ASSERT(rc == 0); 892 entry->submitted = true; 893 } 894 895 static void 896 bdev_io_types_test(void) 897 { 898 struct spdk_bdev *bdev; 899 struct spdk_bdev_desc *desc = NULL; 900 struct spdk_io_channel *io_ch; 901 struct spdk_bdev_opts bdev_opts = {}; 902 int rc; 903 904 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 905 bdev_opts.bdev_io_pool_size = 4; 906 bdev_opts.bdev_io_cache_size = 2; 907 908 rc = spdk_bdev_set_opts(&bdev_opts); 909 CU_ASSERT(rc == 0); 910 spdk_bdev_initialize(bdev_init_cb, NULL); 911 poll_threads(); 912 913 bdev = allocate_bdev("bdev0"); 914 915 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 916 CU_ASSERT(rc == 0); 917 poll_threads(); 918 SPDK_CU_ASSERT_FATAL(desc != NULL); 919 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 920 io_ch = spdk_bdev_get_io_channel(desc); 921 CU_ASSERT(io_ch != NULL); 922 923 /* WRITE and WRITE ZEROES are not supported */ 924 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 925 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 926 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 927 CU_ASSERT(rc == -ENOTSUP); 928 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 929 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 930 931 spdk_put_io_channel(io_ch); 932 spdk_bdev_close(desc); 933 free_bdev(bdev); 934 spdk_bdev_finish(bdev_fini_cb, NULL); 935 poll_threads(); 936 } 937 938 static void 939 bdev_io_wait_test(void) 940 { 941 struct spdk_bdev *bdev; 942 struct spdk_bdev_desc *desc = NULL; 943 struct spdk_io_channel *io_ch; 944 struct spdk_bdev_opts bdev_opts = {}; 945 struct bdev_ut_io_wait_entry io_wait_entry; 946 struct bdev_ut_io_wait_entry io_wait_entry2; 947 int rc; 948 949 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 950 bdev_opts.bdev_io_pool_size = 4; 951 bdev_opts.bdev_io_cache_size = 2; 952 953 rc = spdk_bdev_set_opts(&bdev_opts); 954 CU_ASSERT(rc == 0); 955 spdk_bdev_initialize(bdev_init_cb, NULL); 956 poll_threads(); 957 958 bdev = allocate_bdev("bdev0"); 959 960 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 961 CU_ASSERT(rc == 0); 962 poll_threads(); 963 SPDK_CU_ASSERT_FATAL(desc != NULL); 964 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 965 io_ch = spdk_bdev_get_io_channel(desc); 966 CU_ASSERT(io_ch != NULL); 967 968 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 969 CU_ASSERT(rc == 0); 970 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 971 CU_ASSERT(rc == 0); 972 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 973 CU_ASSERT(rc == 0); 974 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 975 CU_ASSERT(rc == 0); 976 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 977 978 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 979 CU_ASSERT(rc == -ENOMEM); 980 981 io_wait_entry.entry.bdev = bdev; 982 io_wait_entry.entry.cb_fn = io_wait_cb; 983 io_wait_entry.entry.cb_arg = &io_wait_entry; 984 io_wait_entry.io_ch = io_ch; 985 io_wait_entry.desc = desc; 986 io_wait_entry.submitted = false; 987 /* Cannot use the same io_wait_entry for two different calls. */ 988 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 989 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 990 991 /* Queue two I/O waits. */ 992 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 993 CU_ASSERT(rc == 0); 994 CU_ASSERT(io_wait_entry.submitted == false); 995 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 996 CU_ASSERT(rc == 0); 997 CU_ASSERT(io_wait_entry2.submitted == false); 998 999 stub_complete_io(1); 1000 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1001 CU_ASSERT(io_wait_entry.submitted == true); 1002 CU_ASSERT(io_wait_entry2.submitted == false); 1003 1004 stub_complete_io(1); 1005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1006 CU_ASSERT(io_wait_entry2.submitted == true); 1007 1008 stub_complete_io(4); 1009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1010 1011 spdk_put_io_channel(io_ch); 1012 spdk_bdev_close(desc); 1013 free_bdev(bdev); 1014 spdk_bdev_finish(bdev_fini_cb, NULL); 1015 poll_threads(); 1016 } 1017 1018 static void 1019 bdev_io_spans_split_test(void) 1020 { 1021 struct spdk_bdev bdev; 1022 struct spdk_bdev_io bdev_io; 1023 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1024 1025 memset(&bdev, 0, sizeof(bdev)); 1026 bdev_io.u.bdev.iovs = iov; 1027 1028 bdev.optimal_io_boundary = 0; 1029 bdev.max_segment_size = 0; 1030 bdev.max_num_segments = 0; 1031 bdev_io.bdev = &bdev; 1032 1033 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1034 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1035 1036 bdev.split_on_optimal_io_boundary = true; 1037 bdev.optimal_io_boundary = 32; 1038 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1039 1040 /* RESETs are not based on LBAs - so this should return false. */ 1041 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1042 1043 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1044 bdev_io.u.bdev.offset_blocks = 0; 1045 bdev_io.u.bdev.num_blocks = 32; 1046 1047 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1048 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1049 1050 bdev_io.u.bdev.num_blocks = 33; 1051 1052 /* This I/O spans a boundary. */ 1053 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1054 1055 bdev_io.u.bdev.num_blocks = 32; 1056 bdev.max_segment_size = 512 * 32; 1057 bdev.max_num_segments = 1; 1058 bdev_io.u.bdev.iovcnt = 1; 1059 iov[0].iov_len = 512; 1060 1061 /* Does not cross and exceed max_size or max_segs */ 1062 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1063 1064 bdev.split_on_optimal_io_boundary = false; 1065 bdev.max_segment_size = 512; 1066 bdev.max_num_segments = 1; 1067 bdev_io.u.bdev.iovcnt = 2; 1068 1069 /* Exceed max_segs */ 1070 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1071 1072 bdev.max_num_segments = 2; 1073 iov[0].iov_len = 513; 1074 iov[1].iov_len = 512; 1075 1076 /* Exceed max_sizes */ 1077 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1078 } 1079 1080 static void 1081 bdev_io_boundary_split_test(void) 1082 { 1083 struct spdk_bdev *bdev; 1084 struct spdk_bdev_desc *desc = NULL; 1085 struct spdk_io_channel *io_ch; 1086 struct spdk_bdev_opts bdev_opts = {}; 1087 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1088 struct ut_expected_io *expected_io; 1089 void *md_buf = (void *)0xFF000000; 1090 uint64_t i; 1091 int rc; 1092 1093 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1094 bdev_opts.bdev_io_pool_size = 512; 1095 bdev_opts.bdev_io_cache_size = 64; 1096 1097 rc = spdk_bdev_set_opts(&bdev_opts); 1098 CU_ASSERT(rc == 0); 1099 spdk_bdev_initialize(bdev_init_cb, NULL); 1100 1101 bdev = allocate_bdev("bdev0"); 1102 1103 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1104 CU_ASSERT(rc == 0); 1105 SPDK_CU_ASSERT_FATAL(desc != NULL); 1106 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1107 io_ch = spdk_bdev_get_io_channel(desc); 1108 CU_ASSERT(io_ch != NULL); 1109 1110 bdev->optimal_io_boundary = 16; 1111 bdev->split_on_optimal_io_boundary = false; 1112 1113 g_io_done = false; 1114 1115 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1116 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1117 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1118 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1119 1120 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1121 CU_ASSERT(rc == 0); 1122 CU_ASSERT(g_io_done == false); 1123 1124 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1125 stub_complete_io(1); 1126 CU_ASSERT(g_io_done == true); 1127 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1128 1129 bdev->split_on_optimal_io_boundary = true; 1130 bdev->md_interleave = false; 1131 bdev->md_len = 8; 1132 1133 /* Now test that a single-vector command is split correctly. 1134 * Offset 14, length 8, payload 0xF000 1135 * Child - Offset 14, length 2, payload 0xF000 1136 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1137 * 1138 * Set up the expected values before calling spdk_bdev_read_blocks 1139 */ 1140 g_io_done = false; 1141 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1142 expected_io->md_buf = md_buf; 1143 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1144 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1145 1146 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1147 expected_io->md_buf = md_buf + 2 * 8; 1148 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1149 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1150 1151 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1152 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1153 14, 8, io_done, NULL); 1154 CU_ASSERT(rc == 0); 1155 CU_ASSERT(g_io_done == false); 1156 1157 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1158 stub_complete_io(2); 1159 CU_ASSERT(g_io_done == true); 1160 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1161 1162 /* Now set up a more complex, multi-vector command that needs to be split, 1163 * including splitting iovecs. 1164 */ 1165 iov[0].iov_base = (void *)0x10000; 1166 iov[0].iov_len = 512; 1167 iov[1].iov_base = (void *)0x20000; 1168 iov[1].iov_len = 20 * 512; 1169 iov[2].iov_base = (void *)0x30000; 1170 iov[2].iov_len = 11 * 512; 1171 1172 g_io_done = false; 1173 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1174 expected_io->md_buf = md_buf; 1175 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1176 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1177 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1178 1179 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1180 expected_io->md_buf = md_buf + 2 * 8; 1181 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1182 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1183 1184 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1185 expected_io->md_buf = md_buf + 18 * 8; 1186 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1187 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1188 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1189 1190 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1191 14, 32, io_done, NULL); 1192 CU_ASSERT(rc == 0); 1193 CU_ASSERT(g_io_done == false); 1194 1195 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1196 stub_complete_io(3); 1197 CU_ASSERT(g_io_done == true); 1198 1199 /* Test multi vector command that needs to be split by strip and then needs to be 1200 * split further due to the capacity of child iovs. 1201 */ 1202 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1203 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1204 iov[i].iov_len = 512; 1205 } 1206 1207 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1208 g_io_done = false; 1209 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1210 BDEV_IO_NUM_CHILD_IOV); 1211 expected_io->md_buf = md_buf; 1212 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1213 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1214 } 1215 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1216 1217 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1218 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1219 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1220 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1221 ut_expected_io_set_iov(expected_io, i, 1222 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1223 } 1224 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1225 1226 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1227 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1228 CU_ASSERT(rc == 0); 1229 CU_ASSERT(g_io_done == false); 1230 1231 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1232 stub_complete_io(1); 1233 CU_ASSERT(g_io_done == false); 1234 1235 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1236 stub_complete_io(1); 1237 CU_ASSERT(g_io_done == true); 1238 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1239 1240 /* Test multi vector command that needs to be split by strip and then needs to be 1241 * split further due to the capacity of child iovs. In this case, the length of 1242 * the rest of iovec array with an I/O boundary is the multiple of block size. 1243 */ 1244 1245 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1246 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1247 */ 1248 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1249 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1250 iov[i].iov_len = 512; 1251 } 1252 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1253 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1254 iov[i].iov_len = 256; 1255 } 1256 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1257 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1258 1259 /* Add an extra iovec to trigger split */ 1260 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1261 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1262 1263 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1264 g_io_done = false; 1265 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1266 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1267 expected_io->md_buf = md_buf; 1268 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1269 ut_expected_io_set_iov(expected_io, i, 1270 (void *)((i + 1) * 0x10000), 512); 1271 } 1272 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1273 ut_expected_io_set_iov(expected_io, i, 1274 (void *)((i + 1) * 0x10000), 256); 1275 } 1276 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1277 1278 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1279 1, 1); 1280 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1281 ut_expected_io_set_iov(expected_io, 0, 1282 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1283 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1284 1285 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1286 1, 1); 1287 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1288 ut_expected_io_set_iov(expected_io, 0, 1289 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1290 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1291 1292 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1293 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1294 CU_ASSERT(rc == 0); 1295 CU_ASSERT(g_io_done == false); 1296 1297 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1298 stub_complete_io(1); 1299 CU_ASSERT(g_io_done == false); 1300 1301 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1302 stub_complete_io(2); 1303 CU_ASSERT(g_io_done == true); 1304 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1305 1306 /* Test multi vector command that needs to be split by strip and then needs to be 1307 * split further due to the capacity of child iovs, the child request offset should 1308 * be rewind to last aligned offset and go success without error. 1309 */ 1310 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1311 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1312 iov[i].iov_len = 512; 1313 } 1314 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1315 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1316 1317 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1318 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1319 1320 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1321 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1322 1323 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1324 g_io_done = false; 1325 g_io_status = 0; 1326 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1327 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1328 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1329 expected_io->md_buf = md_buf; 1330 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1331 ut_expected_io_set_iov(expected_io, i, 1332 (void *)((i + 1) * 0x10000), 512); 1333 } 1334 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1335 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1336 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1337 1, 2); 1338 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1339 ut_expected_io_set_iov(expected_io, 0, 1340 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1341 ut_expected_io_set_iov(expected_io, 1, 1342 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1343 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1344 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1345 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1346 1, 1); 1347 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1348 ut_expected_io_set_iov(expected_io, 0, 1349 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1350 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1351 1352 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1353 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1354 CU_ASSERT(rc == 0); 1355 CU_ASSERT(g_io_done == false); 1356 1357 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1358 stub_complete_io(1); 1359 CU_ASSERT(g_io_done == false); 1360 1361 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1362 stub_complete_io(2); 1363 CU_ASSERT(g_io_done == true); 1364 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1365 1366 /* Test multi vector command that needs to be split due to the IO boundary and 1367 * the capacity of child iovs. Especially test the case when the command is 1368 * split due to the capacity of child iovs, the tail address is not aligned with 1369 * block size and is rewinded to the aligned address. 1370 * 1371 * The iovecs used in read request is complex but is based on the data 1372 * collected in the real issue. We change the base addresses but keep the lengths 1373 * not to loose the credibility of the test. 1374 */ 1375 bdev->optimal_io_boundary = 128; 1376 g_io_done = false; 1377 g_io_status = 0; 1378 1379 for (i = 0; i < 31; i++) { 1380 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1381 iov[i].iov_len = 1024; 1382 } 1383 iov[31].iov_base = (void *)0xFEED1F00000; 1384 iov[31].iov_len = 32768; 1385 iov[32].iov_base = (void *)0xFEED2000000; 1386 iov[32].iov_len = 160; 1387 iov[33].iov_base = (void *)0xFEED2100000; 1388 iov[33].iov_len = 4096; 1389 iov[34].iov_base = (void *)0xFEED2200000; 1390 iov[34].iov_len = 4096; 1391 iov[35].iov_base = (void *)0xFEED2300000; 1392 iov[35].iov_len = 4096; 1393 iov[36].iov_base = (void *)0xFEED2400000; 1394 iov[36].iov_len = 4096; 1395 iov[37].iov_base = (void *)0xFEED2500000; 1396 iov[37].iov_len = 4096; 1397 iov[38].iov_base = (void *)0xFEED2600000; 1398 iov[38].iov_len = 4096; 1399 iov[39].iov_base = (void *)0xFEED2700000; 1400 iov[39].iov_len = 4096; 1401 iov[40].iov_base = (void *)0xFEED2800000; 1402 iov[40].iov_len = 4096; 1403 iov[41].iov_base = (void *)0xFEED2900000; 1404 iov[41].iov_len = 4096; 1405 iov[42].iov_base = (void *)0xFEED2A00000; 1406 iov[42].iov_len = 4096; 1407 iov[43].iov_base = (void *)0xFEED2B00000; 1408 iov[43].iov_len = 12288; 1409 iov[44].iov_base = (void *)0xFEED2C00000; 1410 iov[44].iov_len = 8192; 1411 iov[45].iov_base = (void *)0xFEED2F00000; 1412 iov[45].iov_len = 4096; 1413 iov[46].iov_base = (void *)0xFEED3000000; 1414 iov[46].iov_len = 4096; 1415 iov[47].iov_base = (void *)0xFEED3100000; 1416 iov[47].iov_len = 4096; 1417 iov[48].iov_base = (void *)0xFEED3200000; 1418 iov[48].iov_len = 24576; 1419 iov[49].iov_base = (void *)0xFEED3300000; 1420 iov[49].iov_len = 16384; 1421 iov[50].iov_base = (void *)0xFEED3400000; 1422 iov[50].iov_len = 12288; 1423 iov[51].iov_base = (void *)0xFEED3500000; 1424 iov[51].iov_len = 4096; 1425 iov[52].iov_base = (void *)0xFEED3600000; 1426 iov[52].iov_len = 4096; 1427 iov[53].iov_base = (void *)0xFEED3700000; 1428 iov[53].iov_len = 4096; 1429 iov[54].iov_base = (void *)0xFEED3800000; 1430 iov[54].iov_len = 28672; 1431 iov[55].iov_base = (void *)0xFEED3900000; 1432 iov[55].iov_len = 20480; 1433 iov[56].iov_base = (void *)0xFEED3A00000; 1434 iov[56].iov_len = 4096; 1435 iov[57].iov_base = (void *)0xFEED3B00000; 1436 iov[57].iov_len = 12288; 1437 iov[58].iov_base = (void *)0xFEED3C00000; 1438 iov[58].iov_len = 4096; 1439 iov[59].iov_base = (void *)0xFEED3D00000; 1440 iov[59].iov_len = 4096; 1441 iov[60].iov_base = (void *)0xFEED3E00000; 1442 iov[60].iov_len = 352; 1443 1444 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1445 * of child iovs, 1446 */ 1447 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1448 expected_io->md_buf = md_buf; 1449 for (i = 0; i < 32; i++) { 1450 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1451 } 1452 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1453 1454 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1455 * split by the IO boundary requirement. 1456 */ 1457 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1458 expected_io->md_buf = md_buf + 126 * 8; 1459 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1460 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1461 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1462 1463 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1464 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1465 */ 1466 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1467 expected_io->md_buf = md_buf + 128 * 8; 1468 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1469 iov[33].iov_len - 864); 1470 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1471 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1472 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1473 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1474 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1475 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1476 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1477 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1478 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1479 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1480 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1481 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1482 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1483 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1484 1485 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1486 * first 864 bytes of iov[52] split by the IO boundary requirement. 1487 */ 1488 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1489 expected_io->md_buf = md_buf + 256 * 8; 1490 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1491 iov[46].iov_len - 864); 1492 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1493 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1494 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1495 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1496 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1497 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1498 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1499 1500 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1501 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1502 */ 1503 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1504 expected_io->md_buf = md_buf + 384 * 8; 1505 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1506 iov[52].iov_len - 864); 1507 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1508 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1509 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1510 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1511 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1512 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1513 1514 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1515 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1516 */ 1517 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1518 expected_io->md_buf = md_buf + 512 * 8; 1519 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1520 iov[57].iov_len - 4960); 1521 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1522 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1523 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1524 1525 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1526 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1527 expected_io->md_buf = md_buf + 542 * 8; 1528 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1529 iov[59].iov_len - 3936); 1530 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1531 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1532 1533 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1534 0, 543, io_done, NULL); 1535 CU_ASSERT(rc == 0); 1536 CU_ASSERT(g_io_done == false); 1537 1538 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1539 stub_complete_io(1); 1540 CU_ASSERT(g_io_done == false); 1541 1542 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1543 stub_complete_io(5); 1544 CU_ASSERT(g_io_done == false); 1545 1546 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1547 stub_complete_io(1); 1548 CU_ASSERT(g_io_done == true); 1549 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1550 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1551 1552 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1553 * split, so test that. 1554 */ 1555 bdev->optimal_io_boundary = 15; 1556 g_io_done = false; 1557 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1558 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1559 1560 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1561 CU_ASSERT(rc == 0); 1562 CU_ASSERT(g_io_done == false); 1563 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1564 stub_complete_io(1); 1565 CU_ASSERT(g_io_done == true); 1566 1567 /* Test an UNMAP. This should also not be split. */ 1568 bdev->optimal_io_boundary = 16; 1569 g_io_done = false; 1570 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1571 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1572 1573 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1574 CU_ASSERT(rc == 0); 1575 CU_ASSERT(g_io_done == false); 1576 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1577 stub_complete_io(1); 1578 CU_ASSERT(g_io_done == true); 1579 1580 /* Test a FLUSH. This should also not be split. */ 1581 bdev->optimal_io_boundary = 16; 1582 g_io_done = false; 1583 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1584 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1585 1586 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1587 CU_ASSERT(rc == 0); 1588 CU_ASSERT(g_io_done == false); 1589 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1590 stub_complete_io(1); 1591 CU_ASSERT(g_io_done == true); 1592 1593 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1594 1595 /* Children requests return an error status */ 1596 bdev->optimal_io_boundary = 16; 1597 iov[0].iov_base = (void *)0x10000; 1598 iov[0].iov_len = 512 * 64; 1599 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1600 g_io_done = false; 1601 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1602 1603 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1604 CU_ASSERT(rc == 0); 1605 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1606 stub_complete_io(4); 1607 CU_ASSERT(g_io_done == false); 1608 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1609 stub_complete_io(1); 1610 CU_ASSERT(g_io_done == true); 1611 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1612 1613 /* Test if a multi vector command terminated with failure before continueing 1614 * splitting process when one of child I/O failed. 1615 * The multi vector command is as same as the above that needs to be split by strip 1616 * and then needs to be split further due to the capacity of child iovs. 1617 */ 1618 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1619 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1620 iov[i].iov_len = 512; 1621 } 1622 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1623 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1624 1625 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1626 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1627 1628 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1629 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1630 1631 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1632 1633 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1634 g_io_done = false; 1635 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1636 1637 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1638 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1639 CU_ASSERT(rc == 0); 1640 CU_ASSERT(g_io_done == false); 1641 1642 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1643 stub_complete_io(1); 1644 CU_ASSERT(g_io_done == true); 1645 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1646 1647 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1648 1649 /* for this test we will create the following conditions to hit the code path where 1650 * we are trying to send and IO following a split that has no iovs because we had to 1651 * trim them for alignment reasons. 1652 * 1653 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1654 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1655 * position 30 and overshoot by 0x2e. 1656 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1657 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1658 * which eliniates that vector so we just send the first split IO with 30 vectors 1659 * and let the completion pick up the last 2 vectors. 1660 */ 1661 bdev->optimal_io_boundary = 32; 1662 bdev->split_on_optimal_io_boundary = true; 1663 g_io_done = false; 1664 1665 /* Init all parent IOVs to 0x212 */ 1666 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1667 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1668 iov[i].iov_len = 0x212; 1669 } 1670 1671 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1672 BDEV_IO_NUM_CHILD_IOV - 1); 1673 /* expect 0-29 to be 1:1 with the parent iov */ 1674 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1675 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1676 } 1677 1678 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1679 * where 0x1e is the amount we overshot the 16K boundary 1680 */ 1681 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1682 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1683 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1684 1685 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1686 * shortened that take it to the next boundary and then a final one to get us to 1687 * 0x4200 bytes for the IO. 1688 */ 1689 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1690 BDEV_IO_NUM_CHILD_IOV, 2); 1691 /* position 30 picked up the remaining bytes to the next boundary */ 1692 ut_expected_io_set_iov(expected_io, 0, 1693 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1694 1695 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1696 ut_expected_io_set_iov(expected_io, 1, 1697 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1698 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1699 1700 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1701 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1702 CU_ASSERT(rc == 0); 1703 CU_ASSERT(g_io_done == false); 1704 1705 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1706 stub_complete_io(1); 1707 CU_ASSERT(g_io_done == false); 1708 1709 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1710 stub_complete_io(1); 1711 CU_ASSERT(g_io_done == true); 1712 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1713 1714 spdk_put_io_channel(io_ch); 1715 spdk_bdev_close(desc); 1716 free_bdev(bdev); 1717 spdk_bdev_finish(bdev_fini_cb, NULL); 1718 poll_threads(); 1719 } 1720 1721 static void 1722 bdev_io_max_size_and_segment_split_test(void) 1723 { 1724 struct spdk_bdev *bdev; 1725 struct spdk_bdev_desc *desc = NULL; 1726 struct spdk_io_channel *io_ch; 1727 struct spdk_bdev_opts bdev_opts = {}; 1728 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1729 struct ut_expected_io *expected_io; 1730 uint64_t i; 1731 int rc; 1732 1733 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1734 bdev_opts.bdev_io_pool_size = 512; 1735 bdev_opts.bdev_io_cache_size = 64; 1736 1737 bdev_opts.opts_size = sizeof(bdev_opts); 1738 rc = spdk_bdev_set_opts(&bdev_opts); 1739 CU_ASSERT(rc == 0); 1740 spdk_bdev_initialize(bdev_init_cb, NULL); 1741 1742 bdev = allocate_bdev("bdev0"); 1743 1744 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1745 CU_ASSERT(rc == 0); 1746 SPDK_CU_ASSERT_FATAL(desc != NULL); 1747 io_ch = spdk_bdev_get_io_channel(desc); 1748 CU_ASSERT(io_ch != NULL); 1749 1750 bdev->split_on_optimal_io_boundary = false; 1751 bdev->optimal_io_boundary = 0; 1752 1753 /* Case 0 max_num_segments == 0. 1754 * but segment size 2 * 512 > 512 1755 */ 1756 bdev->max_segment_size = 512; 1757 bdev->max_num_segments = 0; 1758 g_io_done = false; 1759 1760 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1761 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1762 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1763 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1764 1765 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1766 CU_ASSERT(rc == 0); 1767 CU_ASSERT(g_io_done == false); 1768 1769 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1770 stub_complete_io(1); 1771 CU_ASSERT(g_io_done == true); 1772 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1773 1774 /* Case 1 max_segment_size == 0 1775 * but iov num 2 > 1. 1776 */ 1777 bdev->max_segment_size = 0; 1778 bdev->max_num_segments = 1; 1779 g_io_done = false; 1780 1781 iov[0].iov_base = (void *)0x10000; 1782 iov[0].iov_len = 512; 1783 iov[1].iov_base = (void *)0x20000; 1784 iov[1].iov_len = 8 * 512; 1785 1786 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1787 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1788 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1789 1790 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1791 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1792 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1793 1794 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1795 CU_ASSERT(rc == 0); 1796 CU_ASSERT(g_io_done == false); 1797 1798 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1799 stub_complete_io(2); 1800 CU_ASSERT(g_io_done == true); 1801 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1802 1803 /* Test that a non-vector command is split correctly. 1804 * Set up the expected values before calling spdk_bdev_read_blocks 1805 */ 1806 bdev->max_segment_size = 512; 1807 bdev->max_num_segments = 1; 1808 g_io_done = false; 1809 1810 /* Child IO 0 */ 1811 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1812 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1813 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1814 1815 /* Child IO 1 */ 1816 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1817 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1818 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1819 1820 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1821 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1822 CU_ASSERT(rc == 0); 1823 CU_ASSERT(g_io_done == false); 1824 1825 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1826 stub_complete_io(2); 1827 CU_ASSERT(g_io_done == true); 1828 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1829 1830 /* Now set up a more complex, multi-vector command that needs to be split, 1831 * including splitting iovecs. 1832 */ 1833 bdev->max_segment_size = 2 * 512; 1834 bdev->max_num_segments = 1; 1835 g_io_done = false; 1836 1837 iov[0].iov_base = (void *)0x10000; 1838 iov[0].iov_len = 2 * 512; 1839 iov[1].iov_base = (void *)0x20000; 1840 iov[1].iov_len = 4 * 512; 1841 iov[2].iov_base = (void *)0x30000; 1842 iov[2].iov_len = 6 * 512; 1843 1844 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1845 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1846 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1847 1848 /* Split iov[1].size to 2 iov entries then split the segments */ 1849 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1850 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1851 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1852 1853 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1854 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1855 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1856 1857 /* Split iov[2].size to 3 iov entries then split the segments */ 1858 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1859 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1860 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1861 1862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1863 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1864 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1865 1866 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1867 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1868 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1869 1870 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1871 CU_ASSERT(rc == 0); 1872 CU_ASSERT(g_io_done == false); 1873 1874 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 1875 stub_complete_io(6); 1876 CU_ASSERT(g_io_done == true); 1877 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1878 1879 /* Test multi vector command that needs to be split by strip and then needs to be 1880 * split further due to the capacity of parent IO child iovs. 1881 */ 1882 bdev->max_segment_size = 512; 1883 bdev->max_num_segments = 1; 1884 g_io_done = false; 1885 1886 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1887 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1888 iov[i].iov_len = 512 * 2; 1889 } 1890 1891 /* Each input iov.size is split into 2 iovs, 1892 * half of the input iov can fill all child iov entries of a single IO. 1893 */ 1894 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 1895 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 1896 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1897 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1898 1899 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 1900 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1901 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1902 } 1903 1904 /* The remaining iov is split in the second round */ 1905 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1906 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 1907 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1908 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1909 1910 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 1911 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1912 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1913 } 1914 1915 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 1916 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1917 CU_ASSERT(rc == 0); 1918 CU_ASSERT(g_io_done == false); 1919 1920 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1921 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1922 CU_ASSERT(g_io_done == false); 1923 1924 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1925 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1926 CU_ASSERT(g_io_done == true); 1927 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1928 1929 /* A wrong case, a child IO that is divided does 1930 * not meet the principle of multiples of block size, 1931 * and exits with error 1932 */ 1933 bdev->max_segment_size = 512; 1934 bdev->max_num_segments = 1; 1935 g_io_done = false; 1936 1937 iov[0].iov_base = (void *)0x10000; 1938 iov[0].iov_len = 512 + 256; 1939 iov[1].iov_base = (void *)0x20000; 1940 iov[1].iov_len = 256; 1941 1942 /* iov[0] is split to 512 and 256. 1943 * 256 is less than a block size, and it is found 1944 * in the next round of split that it is the first child IO smaller than 1945 * the block size, so the error exit 1946 */ 1947 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 1948 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 1949 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1950 1951 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 1952 CU_ASSERT(rc == 0); 1953 CU_ASSERT(g_io_done == false); 1954 1955 /* First child IO is OK */ 1956 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1957 stub_complete_io(1); 1958 CU_ASSERT(g_io_done == true); 1959 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1960 1961 /* error exit */ 1962 stub_complete_io(1); 1963 CU_ASSERT(g_io_done == true); 1964 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1965 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1966 1967 /* Test multi vector command that needs to be split by strip and then needs to be 1968 * split further due to the capacity of child iovs. 1969 * 1970 * In this case, the last two iovs need to be split, but it will exceed the capacity 1971 * of child iovs, so it needs to wait until the first batch completed. 1972 */ 1973 bdev->max_segment_size = 512; 1974 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 1975 g_io_done = false; 1976 1977 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1978 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1979 iov[i].iov_len = 512; 1980 } 1981 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1982 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1983 iov[i].iov_len = 512 * 2; 1984 } 1985 1986 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1987 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1988 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 1989 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1990 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1991 } 1992 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 1993 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 1994 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 1995 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1996 1997 /* Child iov entries exceed the max num of parent IO so split it in next round */ 1998 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 1999 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2000 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2001 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2002 2003 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2004 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2005 CU_ASSERT(rc == 0); 2006 CU_ASSERT(g_io_done == false); 2007 2008 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2009 stub_complete_io(1); 2010 CU_ASSERT(g_io_done == false); 2011 2012 /* Next round */ 2013 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2014 stub_complete_io(1); 2015 CU_ASSERT(g_io_done == true); 2016 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2017 2018 /* This case is similar to the previous one, but the io composed of 2019 * the last few entries of child iov is not enough for a blocklen, so they 2020 * cannot be put into this IO, but wait until the next time. 2021 */ 2022 bdev->max_segment_size = 512; 2023 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2024 g_io_done = false; 2025 2026 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2027 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2028 iov[i].iov_len = 512; 2029 } 2030 2031 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2032 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2033 iov[i].iov_len = 128; 2034 } 2035 2036 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2037 * Because the left 2 iov is not enough for a blocklen. 2038 */ 2039 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2040 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2041 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2042 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2043 } 2044 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2045 2046 /* The second child io waits until the end of the first child io before executing. 2047 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2048 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2049 */ 2050 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2051 1, 4); 2052 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2053 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2054 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2055 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2056 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2057 2058 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2059 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2060 CU_ASSERT(rc == 0); 2061 CU_ASSERT(g_io_done == false); 2062 2063 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2064 stub_complete_io(1); 2065 CU_ASSERT(g_io_done == false); 2066 2067 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2068 stub_complete_io(1); 2069 CU_ASSERT(g_io_done == true); 2070 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2071 2072 /* A very complicated case. Each sg entry exceeds max_segment_size and 2073 * needs to be split. At the same time, child io must be a multiple of blocklen. 2074 * At the same time, child iovcnt exceeds parent iovcnt. 2075 */ 2076 bdev->max_segment_size = 512 + 128; 2077 bdev->max_num_segments = 3; 2078 g_io_done = false; 2079 2080 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2081 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2082 iov[i].iov_len = 512 + 256; 2083 } 2084 2085 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2086 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2087 iov[i].iov_len = 512 + 128; 2088 } 2089 2090 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2091 * Consume 4 parent IO iov entries per for() round and 6 block size. 2092 * Generate 9 child IOs. 2093 */ 2094 for (i = 0; i < 3; i++) { 2095 uint32_t j = i * 4; 2096 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2097 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2098 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2099 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2100 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2101 2102 /* Child io must be a multiple of blocklen 2103 * iov[j + 2] must be split. If the third entry is also added, 2104 * the multiple of blocklen cannot be guaranteed. But it still 2105 * occupies one iov entry of the parent child iov. 2106 */ 2107 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2108 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2109 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2110 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2111 2112 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2113 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2114 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2115 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2116 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2117 } 2118 2119 /* Child iov position at 27, the 10th child IO 2120 * iov entry index is 3 * 4 and offset is 3 * 6 2121 */ 2122 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2123 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2124 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2125 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2126 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2127 2128 /* Child iov position at 30, the 11th child IO */ 2129 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2130 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2131 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2132 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2133 2134 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2135 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2136 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2137 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2138 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2139 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2140 2141 /* Consume 9 child IOs and 27 child iov entries. 2142 * Consume 4 parent IO iov entries per for() round and 6 block size. 2143 * Parent IO iov index start from 16 and block offset start from 24 2144 */ 2145 for (i = 0; i < 3; i++) { 2146 uint32_t j = i * 4 + 16; 2147 uint32_t offset = i * 6 + 24; 2148 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2149 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2150 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2151 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2152 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2153 2154 /* Child io must be a multiple of blocklen 2155 * iov[j + 2] must be split. If the third entry is also added, 2156 * the multiple of blocklen cannot be guaranteed. But it still 2157 * occupies one iov entry of the parent child iov. 2158 */ 2159 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2160 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2161 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2162 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2163 2164 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2165 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2166 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2167 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2168 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2169 } 2170 2171 /* The 22th child IO, child iov position at 30 */ 2172 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2173 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2174 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2175 2176 /* The third round */ 2177 /* Here is the 23nd child IO and child iovpos is 0 */ 2178 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2179 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2180 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2181 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2182 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2183 2184 /* The 24th child IO */ 2185 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2186 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2187 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2188 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2189 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2190 2191 /* The 25th child IO */ 2192 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2193 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2194 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2195 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2196 2197 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2198 50, io_done, NULL); 2199 CU_ASSERT(rc == 0); 2200 CU_ASSERT(g_io_done == false); 2201 2202 /* Parent IO supports up to 32 child iovs, so it is calculated that 2203 * a maximum of 11 IOs can be split at a time, and the 2204 * splitting will continue after the first batch is over. 2205 */ 2206 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2207 stub_complete_io(11); 2208 CU_ASSERT(g_io_done == false); 2209 2210 /* The 2nd round */ 2211 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2212 stub_complete_io(11); 2213 CU_ASSERT(g_io_done == false); 2214 2215 /* The last round */ 2216 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2217 stub_complete_io(3); 2218 CU_ASSERT(g_io_done == true); 2219 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2220 2221 /* Test an WRITE_ZEROES. This should also not be split. */ 2222 bdev->max_segment_size = 512; 2223 bdev->max_num_segments = 1; 2224 g_io_done = false; 2225 2226 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2227 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2228 2229 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2230 CU_ASSERT(rc == 0); 2231 CU_ASSERT(g_io_done == false); 2232 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2233 stub_complete_io(1); 2234 CU_ASSERT(g_io_done == true); 2235 2236 /* Test an UNMAP. This should also not be split. */ 2237 g_io_done = false; 2238 2239 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2240 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2241 2242 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2243 CU_ASSERT(rc == 0); 2244 CU_ASSERT(g_io_done == false); 2245 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2246 stub_complete_io(1); 2247 CU_ASSERT(g_io_done == true); 2248 2249 /* Test a FLUSH. This should also not be split. */ 2250 g_io_done = false; 2251 2252 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2253 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2254 2255 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2256 CU_ASSERT(rc == 0); 2257 CU_ASSERT(g_io_done == false); 2258 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2259 stub_complete_io(1); 2260 CU_ASSERT(g_io_done == true); 2261 2262 spdk_put_io_channel(io_ch); 2263 spdk_bdev_close(desc); 2264 free_bdev(bdev); 2265 spdk_bdev_finish(bdev_fini_cb, NULL); 2266 poll_threads(); 2267 } 2268 2269 static void 2270 bdev_io_mix_split_test(void) 2271 { 2272 struct spdk_bdev *bdev; 2273 struct spdk_bdev_desc *desc = NULL; 2274 struct spdk_io_channel *io_ch; 2275 struct spdk_bdev_opts bdev_opts = {}; 2276 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2277 struct ut_expected_io *expected_io; 2278 uint64_t i; 2279 int rc; 2280 2281 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2282 bdev_opts.bdev_io_pool_size = 512; 2283 bdev_opts.bdev_io_cache_size = 64; 2284 2285 rc = spdk_bdev_set_opts(&bdev_opts); 2286 CU_ASSERT(rc == 0); 2287 spdk_bdev_initialize(bdev_init_cb, NULL); 2288 2289 bdev = allocate_bdev("bdev0"); 2290 2291 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2292 CU_ASSERT(rc == 0); 2293 SPDK_CU_ASSERT_FATAL(desc != NULL); 2294 io_ch = spdk_bdev_get_io_channel(desc); 2295 CU_ASSERT(io_ch != NULL); 2296 2297 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2298 bdev->split_on_optimal_io_boundary = true; 2299 bdev->optimal_io_boundary = 16; 2300 2301 bdev->max_segment_size = 512; 2302 bdev->max_num_segments = 16; 2303 g_io_done = false; 2304 2305 /* IO crossing the IO boundary requires split 2306 * Total 2 child IOs. 2307 */ 2308 2309 /* The 1st child IO split the segment_size to multiple segment entry */ 2310 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2311 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2312 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2313 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2314 2315 /* The 2nd child IO split the segment_size to multiple segment entry */ 2316 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2317 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2318 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2319 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2320 2321 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2322 CU_ASSERT(rc == 0); 2323 CU_ASSERT(g_io_done == false); 2324 2325 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2326 stub_complete_io(2); 2327 CU_ASSERT(g_io_done == true); 2328 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2329 2330 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2331 bdev->max_segment_size = 15 * 512; 2332 bdev->max_num_segments = 1; 2333 g_io_done = false; 2334 2335 /* IO crossing the IO boundary requires split. 2336 * The 1st child IO segment size exceeds the max_segment_size, 2337 * So 1st child IO will be splitted to multiple segment entry. 2338 * Then it split to 2 child IOs because of the max_num_segments. 2339 * Total 3 child IOs. 2340 */ 2341 2342 /* The first 2 IOs are in an IO boundary. 2343 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2344 * So it split to the first 2 IOs. 2345 */ 2346 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2347 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2348 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2349 2350 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2351 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2352 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2353 2354 /* The 3rd Child IO is because of the io boundary */ 2355 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2356 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2357 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2358 2359 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2360 CU_ASSERT(rc == 0); 2361 CU_ASSERT(g_io_done == false); 2362 2363 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2364 stub_complete_io(3); 2365 CU_ASSERT(g_io_done == true); 2366 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2367 2368 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2369 bdev->max_segment_size = 17 * 512; 2370 bdev->max_num_segments = 1; 2371 g_io_done = false; 2372 2373 /* IO crossing the IO boundary requires split. 2374 * Child IO does not split. 2375 * Total 2 child IOs. 2376 */ 2377 2378 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2379 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2380 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2381 2382 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2383 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2384 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2385 2386 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2387 CU_ASSERT(rc == 0); 2388 CU_ASSERT(g_io_done == false); 2389 2390 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2391 stub_complete_io(2); 2392 CU_ASSERT(g_io_done == true); 2393 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2394 2395 /* Now set up a more complex, multi-vector command that needs to be split, 2396 * including splitting iovecs. 2397 * optimal_io_boundary < max_segment_size * max_num_segments 2398 */ 2399 bdev->max_segment_size = 3 * 512; 2400 bdev->max_num_segments = 6; 2401 g_io_done = false; 2402 2403 iov[0].iov_base = (void *)0x10000; 2404 iov[0].iov_len = 4 * 512; 2405 iov[1].iov_base = (void *)0x20000; 2406 iov[1].iov_len = 4 * 512; 2407 iov[2].iov_base = (void *)0x30000; 2408 iov[2].iov_len = 10 * 512; 2409 2410 /* IO crossing the IO boundary requires split. 2411 * The 1st child IO segment size exceeds the max_segment_size and after 2412 * splitting segment_size, the num_segments exceeds max_num_segments. 2413 * So 1st child IO will be splitted to 2 child IOs. 2414 * Total 3 child IOs. 2415 */ 2416 2417 /* The first 2 IOs are in an IO boundary. 2418 * After splitting segmemt size the segment num exceeds. 2419 * So it splits to 2 child IOs. 2420 */ 2421 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2422 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2423 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2424 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2425 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2426 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2427 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2428 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2429 2430 /* The 2nd child IO has the left segment entry */ 2431 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2432 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2433 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2434 2435 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2436 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2437 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2438 2439 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2440 CU_ASSERT(rc == 0); 2441 CU_ASSERT(g_io_done == false); 2442 2443 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2444 stub_complete_io(3); 2445 CU_ASSERT(g_io_done == true); 2446 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2447 2448 /* A very complicated case. Each sg entry exceeds max_segment_size 2449 * and split on io boundary. 2450 * optimal_io_boundary < max_segment_size * max_num_segments 2451 */ 2452 bdev->max_segment_size = 3 * 512; 2453 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2454 g_io_done = false; 2455 2456 for (i = 0; i < 20; i++) { 2457 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2458 iov[i].iov_len = 512 * 4; 2459 } 2460 2461 /* IO crossing the IO boundary requires split. 2462 * 80 block length can split 5 child IOs base on offset and IO boundary. 2463 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2464 * Total 5 child IOs. 2465 */ 2466 2467 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2468 * So each child IO occupies 8 child iov entries. 2469 */ 2470 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2471 for (i = 0; i < 4; i++) { 2472 int iovcnt = i * 2; 2473 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2474 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2475 } 2476 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2477 2478 /* 2nd child IO and total 16 child iov entries of parent IO */ 2479 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2480 for (i = 4; i < 8; i++) { 2481 int iovcnt = (i - 4) * 2; 2482 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2483 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2484 } 2485 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2486 2487 /* 3rd child IO and total 24 child iov entries of parent IO */ 2488 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2489 for (i = 8; i < 12; i++) { 2490 int iovcnt = (i - 8) * 2; 2491 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2492 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2493 } 2494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2495 2496 /* 4th child IO and total 32 child iov entries of parent IO */ 2497 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2498 for (i = 12; i < 16; i++) { 2499 int iovcnt = (i - 12) * 2; 2500 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2501 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2502 } 2503 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2504 2505 /* 5th child IO and because of the child iov entry it should be splitted 2506 * in next round. 2507 */ 2508 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2509 for (i = 16; i < 20; i++) { 2510 int iovcnt = (i - 16) * 2; 2511 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2512 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2513 } 2514 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2515 2516 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2517 CU_ASSERT(rc == 0); 2518 CU_ASSERT(g_io_done == false); 2519 2520 /* First split round */ 2521 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2522 stub_complete_io(4); 2523 CU_ASSERT(g_io_done == false); 2524 2525 /* Second split round */ 2526 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2527 stub_complete_io(1); 2528 CU_ASSERT(g_io_done == true); 2529 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2530 2531 spdk_put_io_channel(io_ch); 2532 spdk_bdev_close(desc); 2533 free_bdev(bdev); 2534 spdk_bdev_finish(bdev_fini_cb, NULL); 2535 poll_threads(); 2536 } 2537 2538 static void 2539 bdev_io_split_with_io_wait(void) 2540 { 2541 struct spdk_bdev *bdev; 2542 struct spdk_bdev_desc *desc = NULL; 2543 struct spdk_io_channel *io_ch; 2544 struct spdk_bdev_channel *channel; 2545 struct spdk_bdev_mgmt_channel *mgmt_ch; 2546 struct spdk_bdev_opts bdev_opts = {}; 2547 struct iovec iov[3]; 2548 struct ut_expected_io *expected_io; 2549 int rc; 2550 2551 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2552 bdev_opts.bdev_io_pool_size = 2; 2553 bdev_opts.bdev_io_cache_size = 1; 2554 2555 rc = spdk_bdev_set_opts(&bdev_opts); 2556 CU_ASSERT(rc == 0); 2557 spdk_bdev_initialize(bdev_init_cb, NULL); 2558 2559 bdev = allocate_bdev("bdev0"); 2560 2561 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2562 CU_ASSERT(rc == 0); 2563 CU_ASSERT(desc != NULL); 2564 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2565 io_ch = spdk_bdev_get_io_channel(desc); 2566 CU_ASSERT(io_ch != NULL); 2567 channel = spdk_io_channel_get_ctx(io_ch); 2568 mgmt_ch = channel->shared_resource->mgmt_ch; 2569 2570 bdev->optimal_io_boundary = 16; 2571 bdev->split_on_optimal_io_boundary = true; 2572 2573 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2574 CU_ASSERT(rc == 0); 2575 2576 /* Now test that a single-vector command is split correctly. 2577 * Offset 14, length 8, payload 0xF000 2578 * Child - Offset 14, length 2, payload 0xF000 2579 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2580 * 2581 * Set up the expected values before calling spdk_bdev_read_blocks 2582 */ 2583 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2584 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2585 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2586 2587 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2588 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2589 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2590 2591 /* The following children will be submitted sequentially due to the capacity of 2592 * spdk_bdev_io. 2593 */ 2594 2595 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2596 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2597 CU_ASSERT(rc == 0); 2598 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2599 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2600 2601 /* Completing the first read I/O will submit the first child */ 2602 stub_complete_io(1); 2603 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2604 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2605 2606 /* Completing the first child will submit the second child */ 2607 stub_complete_io(1); 2608 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2609 2610 /* Complete the second child I/O. This should result in our callback getting 2611 * invoked since the parent I/O is now complete. 2612 */ 2613 stub_complete_io(1); 2614 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2615 2616 /* Now set up a more complex, multi-vector command that needs to be split, 2617 * including splitting iovecs. 2618 */ 2619 iov[0].iov_base = (void *)0x10000; 2620 iov[0].iov_len = 512; 2621 iov[1].iov_base = (void *)0x20000; 2622 iov[1].iov_len = 20 * 512; 2623 iov[2].iov_base = (void *)0x30000; 2624 iov[2].iov_len = 11 * 512; 2625 2626 g_io_done = false; 2627 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2628 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2629 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2630 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2631 2632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2633 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2634 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2635 2636 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2637 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2638 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2640 2641 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2642 CU_ASSERT(rc == 0); 2643 CU_ASSERT(g_io_done == false); 2644 2645 /* The following children will be submitted sequentially due to the capacity of 2646 * spdk_bdev_io. 2647 */ 2648 2649 /* Completing the first child will submit the second child */ 2650 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2651 stub_complete_io(1); 2652 CU_ASSERT(g_io_done == false); 2653 2654 /* Completing the second child will submit the third child */ 2655 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2656 stub_complete_io(1); 2657 CU_ASSERT(g_io_done == false); 2658 2659 /* Completing the third child will result in our callback getting invoked 2660 * since the parent I/O is now complete. 2661 */ 2662 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2663 stub_complete_io(1); 2664 CU_ASSERT(g_io_done == true); 2665 2666 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2667 2668 spdk_put_io_channel(io_ch); 2669 spdk_bdev_close(desc); 2670 free_bdev(bdev); 2671 spdk_bdev_finish(bdev_fini_cb, NULL); 2672 poll_threads(); 2673 } 2674 2675 static void 2676 bdev_io_alignment(void) 2677 { 2678 struct spdk_bdev *bdev; 2679 struct spdk_bdev_desc *desc = NULL; 2680 struct spdk_io_channel *io_ch; 2681 struct spdk_bdev_opts bdev_opts = {}; 2682 int rc; 2683 void *buf = NULL; 2684 struct iovec iovs[2]; 2685 int iovcnt; 2686 uint64_t alignment; 2687 2688 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2689 bdev_opts.bdev_io_pool_size = 20; 2690 bdev_opts.bdev_io_cache_size = 2; 2691 2692 rc = spdk_bdev_set_opts(&bdev_opts); 2693 CU_ASSERT(rc == 0); 2694 spdk_bdev_initialize(bdev_init_cb, NULL); 2695 2696 fn_table.submit_request = stub_submit_request_get_buf; 2697 bdev = allocate_bdev("bdev0"); 2698 2699 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2700 CU_ASSERT(rc == 0); 2701 CU_ASSERT(desc != NULL); 2702 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2703 io_ch = spdk_bdev_get_io_channel(desc); 2704 CU_ASSERT(io_ch != NULL); 2705 2706 /* Create aligned buffer */ 2707 rc = posix_memalign(&buf, 4096, 8192); 2708 SPDK_CU_ASSERT_FATAL(rc == 0); 2709 2710 /* Pass aligned single buffer with no alignment required */ 2711 alignment = 1; 2712 bdev->required_alignment = spdk_u32log2(alignment); 2713 2714 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2715 CU_ASSERT(rc == 0); 2716 stub_complete_io(1); 2717 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2718 alignment)); 2719 2720 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2721 CU_ASSERT(rc == 0); 2722 stub_complete_io(1); 2723 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2724 alignment)); 2725 2726 /* Pass unaligned single buffer with no alignment required */ 2727 alignment = 1; 2728 bdev->required_alignment = spdk_u32log2(alignment); 2729 2730 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2731 CU_ASSERT(rc == 0); 2732 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2733 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2734 stub_complete_io(1); 2735 2736 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2737 CU_ASSERT(rc == 0); 2738 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2739 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2740 stub_complete_io(1); 2741 2742 /* Pass unaligned single buffer with 512 alignment required */ 2743 alignment = 512; 2744 bdev->required_alignment = spdk_u32log2(alignment); 2745 2746 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2747 CU_ASSERT(rc == 0); 2748 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2749 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2750 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2751 alignment)); 2752 stub_complete_io(1); 2753 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2754 2755 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2756 CU_ASSERT(rc == 0); 2757 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2758 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2759 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2760 alignment)); 2761 stub_complete_io(1); 2762 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2763 2764 /* Pass unaligned single buffer with 4096 alignment required */ 2765 alignment = 4096; 2766 bdev->required_alignment = spdk_u32log2(alignment); 2767 2768 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2769 CU_ASSERT(rc == 0); 2770 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2771 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2772 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2773 alignment)); 2774 stub_complete_io(1); 2775 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2776 2777 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2778 CU_ASSERT(rc == 0); 2779 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2780 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2781 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2782 alignment)); 2783 stub_complete_io(1); 2784 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2785 2786 /* Pass aligned iovs with no alignment required */ 2787 alignment = 1; 2788 bdev->required_alignment = spdk_u32log2(alignment); 2789 2790 iovcnt = 1; 2791 iovs[0].iov_base = buf; 2792 iovs[0].iov_len = 512; 2793 2794 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2795 CU_ASSERT(rc == 0); 2796 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2797 stub_complete_io(1); 2798 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2799 2800 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2801 CU_ASSERT(rc == 0); 2802 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2803 stub_complete_io(1); 2804 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2805 2806 /* Pass unaligned iovs with no alignment required */ 2807 alignment = 1; 2808 bdev->required_alignment = spdk_u32log2(alignment); 2809 2810 iovcnt = 2; 2811 iovs[0].iov_base = buf + 16; 2812 iovs[0].iov_len = 256; 2813 iovs[1].iov_base = buf + 16 + 256 + 32; 2814 iovs[1].iov_len = 256; 2815 2816 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2817 CU_ASSERT(rc == 0); 2818 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2819 stub_complete_io(1); 2820 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2821 2822 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2823 CU_ASSERT(rc == 0); 2824 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2825 stub_complete_io(1); 2826 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2827 2828 /* Pass unaligned iov with 2048 alignment required */ 2829 alignment = 2048; 2830 bdev->required_alignment = spdk_u32log2(alignment); 2831 2832 iovcnt = 2; 2833 iovs[0].iov_base = buf + 16; 2834 iovs[0].iov_len = 256; 2835 iovs[1].iov_base = buf + 16 + 256 + 32; 2836 iovs[1].iov_len = 256; 2837 2838 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2839 CU_ASSERT(rc == 0); 2840 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2841 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2842 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2843 alignment)); 2844 stub_complete_io(1); 2845 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2846 2847 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2848 CU_ASSERT(rc == 0); 2849 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2850 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2851 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2852 alignment)); 2853 stub_complete_io(1); 2854 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2855 2856 /* Pass iov without allocated buffer without alignment required */ 2857 alignment = 1; 2858 bdev->required_alignment = spdk_u32log2(alignment); 2859 2860 iovcnt = 1; 2861 iovs[0].iov_base = NULL; 2862 iovs[0].iov_len = 0; 2863 2864 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2865 CU_ASSERT(rc == 0); 2866 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2867 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2868 alignment)); 2869 stub_complete_io(1); 2870 2871 /* Pass iov without allocated buffer with 1024 alignment required */ 2872 alignment = 1024; 2873 bdev->required_alignment = spdk_u32log2(alignment); 2874 2875 iovcnt = 1; 2876 iovs[0].iov_base = NULL; 2877 iovs[0].iov_len = 0; 2878 2879 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2880 CU_ASSERT(rc == 0); 2881 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2882 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2883 alignment)); 2884 stub_complete_io(1); 2885 2886 spdk_put_io_channel(io_ch); 2887 spdk_bdev_close(desc); 2888 free_bdev(bdev); 2889 fn_table.submit_request = stub_submit_request; 2890 spdk_bdev_finish(bdev_fini_cb, NULL); 2891 poll_threads(); 2892 2893 free(buf); 2894 } 2895 2896 static void 2897 bdev_io_alignment_with_boundary(void) 2898 { 2899 struct spdk_bdev *bdev; 2900 struct spdk_bdev_desc *desc = NULL; 2901 struct spdk_io_channel *io_ch; 2902 struct spdk_bdev_opts bdev_opts = {}; 2903 int rc; 2904 void *buf = NULL; 2905 struct iovec iovs[2]; 2906 int iovcnt; 2907 uint64_t alignment; 2908 2909 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2910 bdev_opts.bdev_io_pool_size = 20; 2911 bdev_opts.bdev_io_cache_size = 2; 2912 2913 bdev_opts.opts_size = sizeof(bdev_opts); 2914 rc = spdk_bdev_set_opts(&bdev_opts); 2915 CU_ASSERT(rc == 0); 2916 spdk_bdev_initialize(bdev_init_cb, NULL); 2917 2918 fn_table.submit_request = stub_submit_request_get_buf; 2919 bdev = allocate_bdev("bdev0"); 2920 2921 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2922 CU_ASSERT(rc == 0); 2923 CU_ASSERT(desc != NULL); 2924 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2925 io_ch = spdk_bdev_get_io_channel(desc); 2926 CU_ASSERT(io_ch != NULL); 2927 2928 /* Create aligned buffer */ 2929 rc = posix_memalign(&buf, 4096, 131072); 2930 SPDK_CU_ASSERT_FATAL(rc == 0); 2931 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2932 2933 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2934 alignment = 512; 2935 bdev->required_alignment = spdk_u32log2(alignment); 2936 bdev->optimal_io_boundary = 2; 2937 bdev->split_on_optimal_io_boundary = true; 2938 2939 iovcnt = 1; 2940 iovs[0].iov_base = NULL; 2941 iovs[0].iov_len = 512 * 3; 2942 2943 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2944 CU_ASSERT(rc == 0); 2945 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2946 stub_complete_io(2); 2947 2948 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2949 alignment = 512; 2950 bdev->required_alignment = spdk_u32log2(alignment); 2951 bdev->optimal_io_boundary = 16; 2952 bdev->split_on_optimal_io_boundary = true; 2953 2954 iovcnt = 1; 2955 iovs[0].iov_base = NULL; 2956 iovs[0].iov_len = 512 * 16; 2957 2958 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2959 CU_ASSERT(rc == 0); 2960 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2961 stub_complete_io(2); 2962 2963 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2964 alignment = 512; 2965 bdev->required_alignment = spdk_u32log2(alignment); 2966 bdev->optimal_io_boundary = 128; 2967 bdev->split_on_optimal_io_boundary = true; 2968 2969 iovcnt = 1; 2970 iovs[0].iov_base = buf + 16; 2971 iovs[0].iov_len = 512 * 160; 2972 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2973 CU_ASSERT(rc == 0); 2974 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2975 stub_complete_io(2); 2976 2977 /* 512 * 3 with 2 IO boundary */ 2978 alignment = 512; 2979 bdev->required_alignment = spdk_u32log2(alignment); 2980 bdev->optimal_io_boundary = 2; 2981 bdev->split_on_optimal_io_boundary = true; 2982 2983 iovcnt = 2; 2984 iovs[0].iov_base = buf + 16; 2985 iovs[0].iov_len = 512; 2986 iovs[1].iov_base = buf + 16 + 512 + 32; 2987 iovs[1].iov_len = 1024; 2988 2989 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2990 CU_ASSERT(rc == 0); 2991 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2992 stub_complete_io(2); 2993 2994 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2995 CU_ASSERT(rc == 0); 2996 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2997 stub_complete_io(2); 2998 2999 /* 512 * 64 with 32 IO boundary */ 3000 bdev->optimal_io_boundary = 32; 3001 iovcnt = 2; 3002 iovs[0].iov_base = buf + 16; 3003 iovs[0].iov_len = 16384; 3004 iovs[1].iov_base = buf + 16 + 16384 + 32; 3005 iovs[1].iov_len = 16384; 3006 3007 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3008 CU_ASSERT(rc == 0); 3009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3010 stub_complete_io(3); 3011 3012 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3013 CU_ASSERT(rc == 0); 3014 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3015 stub_complete_io(3); 3016 3017 /* 512 * 160 with 32 IO boundary */ 3018 iovcnt = 1; 3019 iovs[0].iov_base = buf + 16; 3020 iovs[0].iov_len = 16384 + 65536; 3021 3022 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3023 CU_ASSERT(rc == 0); 3024 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3025 stub_complete_io(6); 3026 3027 spdk_put_io_channel(io_ch); 3028 spdk_bdev_close(desc); 3029 free_bdev(bdev); 3030 fn_table.submit_request = stub_submit_request; 3031 spdk_bdev_finish(bdev_fini_cb, NULL); 3032 poll_threads(); 3033 3034 free(buf); 3035 } 3036 3037 static void 3038 histogram_status_cb(void *cb_arg, int status) 3039 { 3040 g_status = status; 3041 } 3042 3043 static void 3044 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3045 { 3046 g_status = status; 3047 g_histogram = histogram; 3048 } 3049 3050 static void 3051 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3052 uint64_t total, uint64_t so_far) 3053 { 3054 g_count += count; 3055 } 3056 3057 static void 3058 bdev_histograms(void) 3059 { 3060 struct spdk_bdev *bdev; 3061 struct spdk_bdev_desc *desc = NULL; 3062 struct spdk_io_channel *ch; 3063 struct spdk_histogram_data *histogram; 3064 uint8_t buf[4096]; 3065 int rc; 3066 3067 spdk_bdev_initialize(bdev_init_cb, NULL); 3068 3069 bdev = allocate_bdev("bdev"); 3070 3071 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3072 CU_ASSERT(rc == 0); 3073 CU_ASSERT(desc != NULL); 3074 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3075 3076 ch = spdk_bdev_get_io_channel(desc); 3077 CU_ASSERT(ch != NULL); 3078 3079 /* Enable histogram */ 3080 g_status = -1; 3081 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3082 poll_threads(); 3083 CU_ASSERT(g_status == 0); 3084 CU_ASSERT(bdev->internal.histogram_enabled == true); 3085 3086 /* Allocate histogram */ 3087 histogram = spdk_histogram_data_alloc(); 3088 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3089 3090 /* Check if histogram is zeroed */ 3091 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3092 poll_threads(); 3093 CU_ASSERT(g_status == 0); 3094 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3095 3096 g_count = 0; 3097 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3098 3099 CU_ASSERT(g_count == 0); 3100 3101 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3102 CU_ASSERT(rc == 0); 3103 3104 spdk_delay_us(10); 3105 stub_complete_io(1); 3106 poll_threads(); 3107 3108 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3109 CU_ASSERT(rc == 0); 3110 3111 spdk_delay_us(10); 3112 stub_complete_io(1); 3113 poll_threads(); 3114 3115 /* Check if histogram gathered data from all I/O channels */ 3116 g_histogram = NULL; 3117 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3118 poll_threads(); 3119 CU_ASSERT(g_status == 0); 3120 CU_ASSERT(bdev->internal.histogram_enabled == true); 3121 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3122 3123 g_count = 0; 3124 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3125 CU_ASSERT(g_count == 2); 3126 3127 /* Disable histogram */ 3128 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3129 poll_threads(); 3130 CU_ASSERT(g_status == 0); 3131 CU_ASSERT(bdev->internal.histogram_enabled == false); 3132 3133 /* Try to run histogram commands on disabled bdev */ 3134 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3135 poll_threads(); 3136 CU_ASSERT(g_status == -EFAULT); 3137 3138 spdk_histogram_data_free(histogram); 3139 spdk_put_io_channel(ch); 3140 spdk_bdev_close(desc); 3141 free_bdev(bdev); 3142 spdk_bdev_finish(bdev_fini_cb, NULL); 3143 poll_threads(); 3144 } 3145 3146 static void 3147 _bdev_compare(bool emulated) 3148 { 3149 struct spdk_bdev *bdev; 3150 struct spdk_bdev_desc *desc = NULL; 3151 struct spdk_io_channel *ioch; 3152 struct ut_expected_io *expected_io; 3153 uint64_t offset, num_blocks; 3154 uint32_t num_completed; 3155 char aa_buf[512]; 3156 char bb_buf[512]; 3157 struct iovec compare_iov; 3158 uint8_t io_type; 3159 int rc; 3160 3161 if (emulated) { 3162 io_type = SPDK_BDEV_IO_TYPE_READ; 3163 } else { 3164 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3165 } 3166 3167 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3168 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3169 3170 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3171 3172 spdk_bdev_initialize(bdev_init_cb, NULL); 3173 fn_table.submit_request = stub_submit_request_get_buf; 3174 bdev = allocate_bdev("bdev"); 3175 3176 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3177 CU_ASSERT_EQUAL(rc, 0); 3178 SPDK_CU_ASSERT_FATAL(desc != NULL); 3179 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3180 ioch = spdk_bdev_get_io_channel(desc); 3181 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3182 3183 fn_table.submit_request = stub_submit_request_get_buf; 3184 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3185 3186 offset = 50; 3187 num_blocks = 1; 3188 compare_iov.iov_base = aa_buf; 3189 compare_iov.iov_len = sizeof(aa_buf); 3190 3191 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3192 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3193 3194 g_io_done = false; 3195 g_compare_read_buf = aa_buf; 3196 g_compare_read_buf_len = sizeof(aa_buf); 3197 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3198 CU_ASSERT_EQUAL(rc, 0); 3199 num_completed = stub_complete_io(1); 3200 CU_ASSERT_EQUAL(num_completed, 1); 3201 CU_ASSERT(g_io_done == true); 3202 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3203 3204 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3205 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3206 3207 g_io_done = false; 3208 g_compare_read_buf = bb_buf; 3209 g_compare_read_buf_len = sizeof(bb_buf); 3210 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3211 CU_ASSERT_EQUAL(rc, 0); 3212 num_completed = stub_complete_io(1); 3213 CU_ASSERT_EQUAL(num_completed, 1); 3214 CU_ASSERT(g_io_done == true); 3215 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3216 3217 spdk_put_io_channel(ioch); 3218 spdk_bdev_close(desc); 3219 free_bdev(bdev); 3220 fn_table.submit_request = stub_submit_request; 3221 spdk_bdev_finish(bdev_fini_cb, NULL); 3222 poll_threads(); 3223 3224 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3225 3226 g_compare_read_buf = NULL; 3227 } 3228 3229 static void 3230 bdev_compare(void) 3231 { 3232 _bdev_compare(true); 3233 _bdev_compare(false); 3234 } 3235 3236 static void 3237 bdev_compare_and_write(void) 3238 { 3239 struct spdk_bdev *bdev; 3240 struct spdk_bdev_desc *desc = NULL; 3241 struct spdk_io_channel *ioch; 3242 struct ut_expected_io *expected_io; 3243 uint64_t offset, num_blocks; 3244 uint32_t num_completed; 3245 char aa_buf[512]; 3246 char bb_buf[512]; 3247 char cc_buf[512]; 3248 char write_buf[512]; 3249 struct iovec compare_iov; 3250 struct iovec write_iov; 3251 int rc; 3252 3253 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3254 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3255 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3256 3257 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3258 3259 spdk_bdev_initialize(bdev_init_cb, NULL); 3260 fn_table.submit_request = stub_submit_request_get_buf; 3261 bdev = allocate_bdev("bdev"); 3262 3263 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3264 CU_ASSERT_EQUAL(rc, 0); 3265 SPDK_CU_ASSERT_FATAL(desc != NULL); 3266 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3267 ioch = spdk_bdev_get_io_channel(desc); 3268 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3269 3270 fn_table.submit_request = stub_submit_request_get_buf; 3271 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3272 3273 offset = 50; 3274 num_blocks = 1; 3275 compare_iov.iov_base = aa_buf; 3276 compare_iov.iov_len = sizeof(aa_buf); 3277 write_iov.iov_base = bb_buf; 3278 write_iov.iov_len = sizeof(bb_buf); 3279 3280 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3281 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3282 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3283 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3284 3285 g_io_done = false; 3286 g_compare_read_buf = aa_buf; 3287 g_compare_read_buf_len = sizeof(aa_buf); 3288 memset(write_buf, 0, sizeof(write_buf)); 3289 g_compare_write_buf = write_buf; 3290 g_compare_write_buf_len = sizeof(write_buf); 3291 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3292 offset, num_blocks, io_done, NULL); 3293 /* Trigger range locking */ 3294 poll_threads(); 3295 CU_ASSERT_EQUAL(rc, 0); 3296 num_completed = stub_complete_io(1); 3297 CU_ASSERT_EQUAL(num_completed, 1); 3298 CU_ASSERT(g_io_done == false); 3299 num_completed = stub_complete_io(1); 3300 /* Trigger range unlocking */ 3301 poll_threads(); 3302 CU_ASSERT_EQUAL(num_completed, 1); 3303 CU_ASSERT(g_io_done == true); 3304 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3305 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3306 3307 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3308 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3309 3310 g_io_done = false; 3311 g_compare_read_buf = cc_buf; 3312 g_compare_read_buf_len = sizeof(cc_buf); 3313 memset(write_buf, 0, sizeof(write_buf)); 3314 g_compare_write_buf = write_buf; 3315 g_compare_write_buf_len = sizeof(write_buf); 3316 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3317 offset, num_blocks, io_done, NULL); 3318 /* Trigger range locking */ 3319 poll_threads(); 3320 CU_ASSERT_EQUAL(rc, 0); 3321 num_completed = stub_complete_io(1); 3322 /* Trigger range unlocking earlier because we expect error here */ 3323 poll_threads(); 3324 CU_ASSERT_EQUAL(num_completed, 1); 3325 CU_ASSERT(g_io_done == true); 3326 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3327 num_completed = stub_complete_io(1); 3328 CU_ASSERT_EQUAL(num_completed, 0); 3329 3330 spdk_put_io_channel(ioch); 3331 spdk_bdev_close(desc); 3332 free_bdev(bdev); 3333 fn_table.submit_request = stub_submit_request; 3334 spdk_bdev_finish(bdev_fini_cb, NULL); 3335 poll_threads(); 3336 3337 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3338 3339 g_compare_read_buf = NULL; 3340 g_compare_write_buf = NULL; 3341 } 3342 3343 static void 3344 bdev_write_zeroes(void) 3345 { 3346 struct spdk_bdev *bdev; 3347 struct spdk_bdev_desc *desc = NULL; 3348 struct spdk_io_channel *ioch; 3349 struct ut_expected_io *expected_io; 3350 uint64_t offset, num_io_blocks, num_blocks; 3351 uint32_t num_completed, num_requests; 3352 int rc; 3353 3354 spdk_bdev_initialize(bdev_init_cb, NULL); 3355 bdev = allocate_bdev("bdev"); 3356 3357 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3358 CU_ASSERT_EQUAL(rc, 0); 3359 SPDK_CU_ASSERT_FATAL(desc != NULL); 3360 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3361 ioch = spdk_bdev_get_io_channel(desc); 3362 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3363 3364 fn_table.submit_request = stub_submit_request; 3365 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3366 3367 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3368 bdev->md_len = 0; 3369 bdev->blocklen = 4096; 3370 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3371 3372 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3373 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3374 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3375 CU_ASSERT_EQUAL(rc, 0); 3376 num_completed = stub_complete_io(1); 3377 CU_ASSERT_EQUAL(num_completed, 1); 3378 3379 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3380 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3381 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3382 num_requests = 2; 3383 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3384 3385 for (offset = 0; offset < num_requests; ++offset) { 3386 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3387 offset * num_io_blocks, num_io_blocks, 0); 3388 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3389 } 3390 3391 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3392 CU_ASSERT_EQUAL(rc, 0); 3393 num_completed = stub_complete_io(num_requests); 3394 CU_ASSERT_EQUAL(num_completed, num_requests); 3395 3396 /* Check that the splitting is correct if bdev has interleaved metadata */ 3397 bdev->md_interleave = true; 3398 bdev->md_len = 64; 3399 bdev->blocklen = 4096 + 64; 3400 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3401 3402 num_requests = offset = 0; 3403 while (offset < num_blocks) { 3404 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3405 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3406 offset, num_io_blocks, 0); 3407 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3408 offset += num_io_blocks; 3409 num_requests++; 3410 } 3411 3412 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3413 CU_ASSERT_EQUAL(rc, 0); 3414 num_completed = stub_complete_io(num_requests); 3415 CU_ASSERT_EQUAL(num_completed, num_requests); 3416 num_completed = stub_complete_io(num_requests); 3417 assert(num_completed == 0); 3418 3419 /* Check the the same for separate metadata buffer */ 3420 bdev->md_interleave = false; 3421 bdev->md_len = 64; 3422 bdev->blocklen = 4096; 3423 3424 num_requests = offset = 0; 3425 while (offset < num_blocks) { 3426 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3427 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3428 offset, num_io_blocks, 0); 3429 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3431 offset += num_io_blocks; 3432 num_requests++; 3433 } 3434 3435 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3436 CU_ASSERT_EQUAL(rc, 0); 3437 num_completed = stub_complete_io(num_requests); 3438 CU_ASSERT_EQUAL(num_completed, num_requests); 3439 3440 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3441 spdk_put_io_channel(ioch); 3442 spdk_bdev_close(desc); 3443 free_bdev(bdev); 3444 spdk_bdev_finish(bdev_fini_cb, NULL); 3445 poll_threads(); 3446 } 3447 3448 static void 3449 bdev_open_while_hotremove(void) 3450 { 3451 struct spdk_bdev *bdev; 3452 struct spdk_bdev_desc *desc[2] = {}; 3453 int rc; 3454 3455 bdev = allocate_bdev("bdev"); 3456 3457 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3458 CU_ASSERT(rc == 0); 3459 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3460 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3461 3462 spdk_bdev_unregister(bdev, NULL, NULL); 3463 3464 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3465 CU_ASSERT(rc == -ENODEV); 3466 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3467 3468 spdk_bdev_close(desc[0]); 3469 free_bdev(bdev); 3470 } 3471 3472 static void 3473 bdev_close_while_hotremove(void) 3474 { 3475 struct spdk_bdev *bdev; 3476 struct spdk_bdev_desc *desc = NULL; 3477 int rc = 0; 3478 3479 bdev = allocate_bdev("bdev"); 3480 3481 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3482 CU_ASSERT_EQUAL(rc, 0); 3483 SPDK_CU_ASSERT_FATAL(desc != NULL); 3484 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3485 3486 /* Simulate hot-unplug by unregistering bdev */ 3487 g_event_type1 = 0xFF; 3488 g_unregister_arg = NULL; 3489 g_unregister_rc = -1; 3490 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3491 /* Close device while remove event is in flight */ 3492 spdk_bdev_close(desc); 3493 3494 /* Ensure that unregister callback is delayed */ 3495 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3496 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3497 3498 poll_threads(); 3499 3500 /* Event callback shall not be issued because device was closed */ 3501 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3502 /* Unregister callback is issued */ 3503 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3504 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3505 3506 free_bdev(bdev); 3507 } 3508 3509 static void 3510 bdev_open_ext(void) 3511 { 3512 struct spdk_bdev *bdev; 3513 struct spdk_bdev_desc *desc1 = NULL; 3514 struct spdk_bdev_desc *desc2 = NULL; 3515 int rc = 0; 3516 3517 bdev = allocate_bdev("bdev"); 3518 3519 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3520 CU_ASSERT_EQUAL(rc, -EINVAL); 3521 3522 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3523 CU_ASSERT_EQUAL(rc, 0); 3524 3525 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3526 CU_ASSERT_EQUAL(rc, 0); 3527 3528 g_event_type1 = 0xFF; 3529 g_event_type2 = 0xFF; 3530 3531 /* Simulate hot-unplug by unregistering bdev */ 3532 spdk_bdev_unregister(bdev, NULL, NULL); 3533 poll_threads(); 3534 3535 /* Check if correct events have been triggered in event callback fn */ 3536 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3537 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3538 3539 free_bdev(bdev); 3540 poll_threads(); 3541 } 3542 3543 struct timeout_io_cb_arg { 3544 struct iovec iov; 3545 uint8_t type; 3546 }; 3547 3548 static int 3549 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3550 { 3551 struct spdk_bdev_io *bdev_io; 3552 int n = 0; 3553 3554 if (!ch) { 3555 return -1; 3556 } 3557 3558 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3559 n++; 3560 } 3561 3562 return n; 3563 } 3564 3565 static void 3566 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3567 { 3568 struct timeout_io_cb_arg *ctx = cb_arg; 3569 3570 ctx->type = bdev_io->type; 3571 ctx->iov.iov_base = bdev_io->iov.iov_base; 3572 ctx->iov.iov_len = bdev_io->iov.iov_len; 3573 } 3574 3575 static void 3576 bdev_set_io_timeout(void) 3577 { 3578 struct spdk_bdev *bdev; 3579 struct spdk_bdev_desc *desc = NULL; 3580 struct spdk_io_channel *io_ch = NULL; 3581 struct spdk_bdev_channel *bdev_ch = NULL; 3582 struct timeout_io_cb_arg cb_arg; 3583 3584 spdk_bdev_initialize(bdev_init_cb, NULL); 3585 3586 bdev = allocate_bdev("bdev"); 3587 3588 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3589 SPDK_CU_ASSERT_FATAL(desc != NULL); 3590 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3591 3592 io_ch = spdk_bdev_get_io_channel(desc); 3593 CU_ASSERT(io_ch != NULL); 3594 3595 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3596 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3597 3598 /* This is the part1. 3599 * We will check the bdev_ch->io_submitted list 3600 * TO make sure that it can link IOs and only the user submitted IOs 3601 */ 3602 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3603 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3604 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3605 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3606 stub_complete_io(1); 3607 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3608 stub_complete_io(1); 3609 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3610 3611 /* Split IO */ 3612 bdev->optimal_io_boundary = 16; 3613 bdev->split_on_optimal_io_boundary = true; 3614 3615 /* Now test that a single-vector command is split correctly. 3616 * Offset 14, length 8, payload 0xF000 3617 * Child - Offset 14, length 2, payload 0xF000 3618 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3619 * 3620 * Set up the expected values before calling spdk_bdev_read_blocks 3621 */ 3622 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3623 /* We count all submitted IOs including IO that are generated by splitting. */ 3624 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 3625 stub_complete_io(1); 3626 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3627 stub_complete_io(1); 3628 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3629 3630 /* Also include the reset IO */ 3631 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3632 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3633 poll_threads(); 3634 stub_complete_io(1); 3635 poll_threads(); 3636 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3637 3638 /* This is part2 3639 * Test the desc timeout poller register 3640 */ 3641 3642 /* Successfully set the timeout */ 3643 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3644 CU_ASSERT(desc->io_timeout_poller != NULL); 3645 CU_ASSERT(desc->timeout_in_sec == 30); 3646 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3647 CU_ASSERT(desc->cb_arg == &cb_arg); 3648 3649 /* Change the timeout limit */ 3650 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3651 CU_ASSERT(desc->io_timeout_poller != NULL); 3652 CU_ASSERT(desc->timeout_in_sec == 20); 3653 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3654 CU_ASSERT(desc->cb_arg == &cb_arg); 3655 3656 /* Disable the timeout */ 3657 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 3658 CU_ASSERT(desc->io_timeout_poller == NULL); 3659 3660 /* This the part3 3661 * We will test to catch timeout IO and check whether the IO is 3662 * the submitted one. 3663 */ 3664 memset(&cb_arg, 0, sizeof(cb_arg)); 3665 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3666 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 3667 3668 /* Don't reach the limit */ 3669 spdk_delay_us(15 * spdk_get_ticks_hz()); 3670 poll_threads(); 3671 CU_ASSERT(cb_arg.type == 0); 3672 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3673 CU_ASSERT(cb_arg.iov.iov_len == 0); 3674 3675 /* 15 + 15 = 30 reach the limit */ 3676 spdk_delay_us(15 * spdk_get_ticks_hz()); 3677 poll_threads(); 3678 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3679 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 3680 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 3681 stub_complete_io(1); 3682 3683 /* Use the same split IO above and check the IO */ 3684 memset(&cb_arg, 0, sizeof(cb_arg)); 3685 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3686 3687 /* The first child complete in time */ 3688 spdk_delay_us(15 * spdk_get_ticks_hz()); 3689 poll_threads(); 3690 stub_complete_io(1); 3691 CU_ASSERT(cb_arg.type == 0); 3692 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3693 CU_ASSERT(cb_arg.iov.iov_len == 0); 3694 3695 /* The second child reach the limit */ 3696 spdk_delay_us(15 * spdk_get_ticks_hz()); 3697 poll_threads(); 3698 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3699 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 3700 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 3701 stub_complete_io(1); 3702 3703 /* Also include the reset IO */ 3704 memset(&cb_arg, 0, sizeof(cb_arg)); 3705 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3706 spdk_delay_us(30 * spdk_get_ticks_hz()); 3707 poll_threads(); 3708 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 3709 stub_complete_io(1); 3710 poll_threads(); 3711 3712 spdk_put_io_channel(io_ch); 3713 spdk_bdev_close(desc); 3714 free_bdev(bdev); 3715 spdk_bdev_finish(bdev_fini_cb, NULL); 3716 poll_threads(); 3717 } 3718 3719 static void 3720 lba_range_overlap(void) 3721 { 3722 struct lba_range r1, r2; 3723 3724 r1.offset = 100; 3725 r1.length = 50; 3726 3727 r2.offset = 0; 3728 r2.length = 1; 3729 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3730 3731 r2.offset = 0; 3732 r2.length = 100; 3733 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3734 3735 r2.offset = 0; 3736 r2.length = 110; 3737 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3738 3739 r2.offset = 100; 3740 r2.length = 10; 3741 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3742 3743 r2.offset = 110; 3744 r2.length = 20; 3745 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3746 3747 r2.offset = 140; 3748 r2.length = 150; 3749 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3750 3751 r2.offset = 130; 3752 r2.length = 200; 3753 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3754 3755 r2.offset = 150; 3756 r2.length = 100; 3757 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3758 3759 r2.offset = 110; 3760 r2.length = 0; 3761 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3762 } 3763 3764 static bool g_lock_lba_range_done; 3765 static bool g_unlock_lba_range_done; 3766 3767 static void 3768 lock_lba_range_done(void *ctx, int status) 3769 { 3770 g_lock_lba_range_done = true; 3771 } 3772 3773 static void 3774 unlock_lba_range_done(void *ctx, int status) 3775 { 3776 g_unlock_lba_range_done = true; 3777 } 3778 3779 static void 3780 lock_lba_range_check_ranges(void) 3781 { 3782 struct spdk_bdev *bdev; 3783 struct spdk_bdev_desc *desc = NULL; 3784 struct spdk_io_channel *io_ch; 3785 struct spdk_bdev_channel *channel; 3786 struct lba_range *range; 3787 int ctx1; 3788 int rc; 3789 3790 spdk_bdev_initialize(bdev_init_cb, NULL); 3791 3792 bdev = allocate_bdev("bdev0"); 3793 3794 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3795 CU_ASSERT(rc == 0); 3796 CU_ASSERT(desc != NULL); 3797 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3798 io_ch = spdk_bdev_get_io_channel(desc); 3799 CU_ASSERT(io_ch != NULL); 3800 channel = spdk_io_channel_get_ctx(io_ch); 3801 3802 g_lock_lba_range_done = false; 3803 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3804 CU_ASSERT(rc == 0); 3805 poll_threads(); 3806 3807 CU_ASSERT(g_lock_lba_range_done == true); 3808 range = TAILQ_FIRST(&channel->locked_ranges); 3809 SPDK_CU_ASSERT_FATAL(range != NULL); 3810 CU_ASSERT(range->offset == 20); 3811 CU_ASSERT(range->length == 10); 3812 CU_ASSERT(range->owner_ch == channel); 3813 3814 /* Unlocks must exactly match a lock. */ 3815 g_unlock_lba_range_done = false; 3816 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 3817 CU_ASSERT(rc == -EINVAL); 3818 CU_ASSERT(g_unlock_lba_range_done == false); 3819 3820 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3821 CU_ASSERT(rc == 0); 3822 spdk_delay_us(100); 3823 poll_threads(); 3824 3825 CU_ASSERT(g_unlock_lba_range_done == true); 3826 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3827 3828 spdk_put_io_channel(io_ch); 3829 spdk_bdev_close(desc); 3830 free_bdev(bdev); 3831 spdk_bdev_finish(bdev_fini_cb, NULL); 3832 poll_threads(); 3833 } 3834 3835 static void 3836 lock_lba_range_with_io_outstanding(void) 3837 { 3838 struct spdk_bdev *bdev; 3839 struct spdk_bdev_desc *desc = NULL; 3840 struct spdk_io_channel *io_ch; 3841 struct spdk_bdev_channel *channel; 3842 struct lba_range *range; 3843 char buf[4096]; 3844 int ctx1; 3845 int rc; 3846 3847 spdk_bdev_initialize(bdev_init_cb, NULL); 3848 3849 bdev = allocate_bdev("bdev0"); 3850 3851 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3852 CU_ASSERT(rc == 0); 3853 CU_ASSERT(desc != NULL); 3854 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3855 io_ch = spdk_bdev_get_io_channel(desc); 3856 CU_ASSERT(io_ch != NULL); 3857 channel = spdk_io_channel_get_ctx(io_ch); 3858 3859 g_io_done = false; 3860 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3861 CU_ASSERT(rc == 0); 3862 3863 g_lock_lba_range_done = false; 3864 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3865 CU_ASSERT(rc == 0); 3866 poll_threads(); 3867 3868 /* The lock should immediately become valid, since there are no outstanding 3869 * write I/O. 3870 */ 3871 CU_ASSERT(g_io_done == false); 3872 CU_ASSERT(g_lock_lba_range_done == true); 3873 range = TAILQ_FIRST(&channel->locked_ranges); 3874 SPDK_CU_ASSERT_FATAL(range != NULL); 3875 CU_ASSERT(range->offset == 20); 3876 CU_ASSERT(range->length == 10); 3877 CU_ASSERT(range->owner_ch == channel); 3878 CU_ASSERT(range->locked_ctx == &ctx1); 3879 3880 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3881 CU_ASSERT(rc == 0); 3882 stub_complete_io(1); 3883 spdk_delay_us(100); 3884 poll_threads(); 3885 3886 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3887 3888 /* Now try again, but with a write I/O. */ 3889 g_io_done = false; 3890 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3891 CU_ASSERT(rc == 0); 3892 3893 g_lock_lba_range_done = false; 3894 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3895 CU_ASSERT(rc == 0); 3896 poll_threads(); 3897 3898 /* The lock should not be fully valid yet, since a write I/O is outstanding. 3899 * But note that the range should be on the channel's locked_list, to make sure no 3900 * new write I/O are started. 3901 */ 3902 CU_ASSERT(g_io_done == false); 3903 CU_ASSERT(g_lock_lba_range_done == false); 3904 range = TAILQ_FIRST(&channel->locked_ranges); 3905 SPDK_CU_ASSERT_FATAL(range != NULL); 3906 CU_ASSERT(range->offset == 20); 3907 CU_ASSERT(range->length == 10); 3908 3909 /* Complete the write I/O. This should make the lock valid (checked by confirming 3910 * our callback was invoked). 3911 */ 3912 stub_complete_io(1); 3913 spdk_delay_us(100); 3914 poll_threads(); 3915 CU_ASSERT(g_io_done == true); 3916 CU_ASSERT(g_lock_lba_range_done == true); 3917 3918 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3919 CU_ASSERT(rc == 0); 3920 poll_threads(); 3921 3922 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3923 3924 spdk_put_io_channel(io_ch); 3925 spdk_bdev_close(desc); 3926 free_bdev(bdev); 3927 spdk_bdev_finish(bdev_fini_cb, NULL); 3928 poll_threads(); 3929 } 3930 3931 static void 3932 lock_lba_range_overlapped(void) 3933 { 3934 struct spdk_bdev *bdev; 3935 struct spdk_bdev_desc *desc = NULL; 3936 struct spdk_io_channel *io_ch; 3937 struct spdk_bdev_channel *channel; 3938 struct lba_range *range; 3939 int ctx1; 3940 int rc; 3941 3942 spdk_bdev_initialize(bdev_init_cb, NULL); 3943 3944 bdev = allocate_bdev("bdev0"); 3945 3946 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3947 CU_ASSERT(rc == 0); 3948 CU_ASSERT(desc != NULL); 3949 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3950 io_ch = spdk_bdev_get_io_channel(desc); 3951 CU_ASSERT(io_ch != NULL); 3952 channel = spdk_io_channel_get_ctx(io_ch); 3953 3954 /* Lock range 20-29. */ 3955 g_lock_lba_range_done = false; 3956 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3957 CU_ASSERT(rc == 0); 3958 poll_threads(); 3959 3960 CU_ASSERT(g_lock_lba_range_done == true); 3961 range = TAILQ_FIRST(&channel->locked_ranges); 3962 SPDK_CU_ASSERT_FATAL(range != NULL); 3963 CU_ASSERT(range->offset == 20); 3964 CU_ASSERT(range->length == 10); 3965 3966 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 3967 * 20-29. 3968 */ 3969 g_lock_lba_range_done = false; 3970 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 3971 CU_ASSERT(rc == 0); 3972 poll_threads(); 3973 3974 CU_ASSERT(g_lock_lba_range_done == false); 3975 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3976 SPDK_CU_ASSERT_FATAL(range != NULL); 3977 CU_ASSERT(range->offset == 25); 3978 CU_ASSERT(range->length == 15); 3979 3980 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 3981 * no longer overlaps with an active lock. 3982 */ 3983 g_unlock_lba_range_done = false; 3984 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3985 CU_ASSERT(rc == 0); 3986 poll_threads(); 3987 3988 CU_ASSERT(g_unlock_lba_range_done == true); 3989 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3990 range = TAILQ_FIRST(&channel->locked_ranges); 3991 SPDK_CU_ASSERT_FATAL(range != NULL); 3992 CU_ASSERT(range->offset == 25); 3993 CU_ASSERT(range->length == 15); 3994 3995 /* Lock 40-59. This should immediately lock since it does not overlap with the 3996 * currently active 25-39 lock. 3997 */ 3998 g_lock_lba_range_done = false; 3999 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4000 CU_ASSERT(rc == 0); 4001 poll_threads(); 4002 4003 CU_ASSERT(g_lock_lba_range_done == true); 4004 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4005 SPDK_CU_ASSERT_FATAL(range != NULL); 4006 range = TAILQ_NEXT(range, tailq); 4007 SPDK_CU_ASSERT_FATAL(range != NULL); 4008 CU_ASSERT(range->offset == 40); 4009 CU_ASSERT(range->length == 20); 4010 4011 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4012 g_lock_lba_range_done = false; 4013 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4014 CU_ASSERT(rc == 0); 4015 poll_threads(); 4016 4017 CU_ASSERT(g_lock_lba_range_done == false); 4018 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4019 SPDK_CU_ASSERT_FATAL(range != NULL); 4020 CU_ASSERT(range->offset == 35); 4021 CU_ASSERT(range->length == 10); 4022 4023 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4024 * the 40-59 lock is still active. 4025 */ 4026 g_unlock_lba_range_done = false; 4027 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4028 CU_ASSERT(rc == 0); 4029 poll_threads(); 4030 4031 CU_ASSERT(g_unlock_lba_range_done == true); 4032 CU_ASSERT(g_lock_lba_range_done == false); 4033 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4034 SPDK_CU_ASSERT_FATAL(range != NULL); 4035 CU_ASSERT(range->offset == 35); 4036 CU_ASSERT(range->length == 10); 4037 4038 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4039 * no longer any active overlapping locks. 4040 */ 4041 g_unlock_lba_range_done = false; 4042 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4043 CU_ASSERT(rc == 0); 4044 poll_threads(); 4045 4046 CU_ASSERT(g_unlock_lba_range_done == true); 4047 CU_ASSERT(g_lock_lba_range_done == true); 4048 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4049 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4050 SPDK_CU_ASSERT_FATAL(range != NULL); 4051 CU_ASSERT(range->offset == 35); 4052 CU_ASSERT(range->length == 10); 4053 4054 /* Finally, unlock 35-44. */ 4055 g_unlock_lba_range_done = false; 4056 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4057 CU_ASSERT(rc == 0); 4058 poll_threads(); 4059 4060 CU_ASSERT(g_unlock_lba_range_done == true); 4061 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4062 4063 spdk_put_io_channel(io_ch); 4064 spdk_bdev_close(desc); 4065 free_bdev(bdev); 4066 spdk_bdev_finish(bdev_fini_cb, NULL); 4067 poll_threads(); 4068 } 4069 4070 static void 4071 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4072 { 4073 g_abort_done = true; 4074 g_abort_status = bdev_io->internal.status; 4075 spdk_bdev_free_io(bdev_io); 4076 } 4077 4078 static void 4079 bdev_io_abort(void) 4080 { 4081 struct spdk_bdev *bdev; 4082 struct spdk_bdev_desc *desc = NULL; 4083 struct spdk_io_channel *io_ch; 4084 struct spdk_bdev_channel *channel; 4085 struct spdk_bdev_mgmt_channel *mgmt_ch; 4086 struct spdk_bdev_opts bdev_opts = {}; 4087 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4088 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4089 int rc; 4090 4091 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4092 bdev_opts.bdev_io_pool_size = 7; 4093 bdev_opts.bdev_io_cache_size = 2; 4094 4095 rc = spdk_bdev_set_opts(&bdev_opts); 4096 CU_ASSERT(rc == 0); 4097 spdk_bdev_initialize(bdev_init_cb, NULL); 4098 4099 bdev = allocate_bdev("bdev0"); 4100 4101 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4102 CU_ASSERT(rc == 0); 4103 CU_ASSERT(desc != NULL); 4104 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4105 io_ch = spdk_bdev_get_io_channel(desc); 4106 CU_ASSERT(io_ch != NULL); 4107 channel = spdk_io_channel_get_ctx(io_ch); 4108 mgmt_ch = channel->shared_resource->mgmt_ch; 4109 4110 g_abort_done = false; 4111 4112 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4113 4114 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4115 CU_ASSERT(rc == -ENOTSUP); 4116 4117 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4118 4119 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4120 CU_ASSERT(rc == 0); 4121 CU_ASSERT(g_abort_done == true); 4122 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4123 4124 /* Test the case that the target I/O was successfully aborted. */ 4125 g_io_done = false; 4126 4127 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4128 CU_ASSERT(rc == 0); 4129 CU_ASSERT(g_io_done == false); 4130 4131 g_abort_done = false; 4132 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4133 4134 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4135 CU_ASSERT(rc == 0); 4136 CU_ASSERT(g_io_done == true); 4137 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4138 stub_complete_io(1); 4139 CU_ASSERT(g_abort_done == true); 4140 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4141 4142 /* Test the case that the target I/O was not aborted because it completed 4143 * in the middle of execution of the abort. 4144 */ 4145 g_io_done = false; 4146 4147 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4148 CU_ASSERT(rc == 0); 4149 CU_ASSERT(g_io_done == false); 4150 4151 g_abort_done = false; 4152 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4153 4154 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4155 CU_ASSERT(rc == 0); 4156 CU_ASSERT(g_io_done == false); 4157 4158 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4159 stub_complete_io(1); 4160 CU_ASSERT(g_io_done == true); 4161 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4162 4163 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4164 stub_complete_io(1); 4165 CU_ASSERT(g_abort_done == true); 4166 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4167 4168 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4169 4170 bdev->optimal_io_boundary = 16; 4171 bdev->split_on_optimal_io_boundary = true; 4172 4173 /* Test that a single-vector command which is split is aborted correctly. 4174 * Offset 14, length 8, payload 0xF000 4175 * Child - Offset 14, length 2, payload 0xF000 4176 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4177 */ 4178 g_io_done = false; 4179 4180 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4181 CU_ASSERT(rc == 0); 4182 CU_ASSERT(g_io_done == false); 4183 4184 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4185 4186 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4187 4188 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4189 CU_ASSERT(rc == 0); 4190 CU_ASSERT(g_io_done == true); 4191 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4192 stub_complete_io(2); 4193 CU_ASSERT(g_abort_done == true); 4194 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4195 4196 /* Test that a multi-vector command that needs to be split by strip and then 4197 * needs to be split is aborted correctly. Abort is requested before the second 4198 * child I/O was submitted. The parent I/O should complete with failure without 4199 * submitting the second child I/O. 4200 */ 4201 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4202 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4203 iov[i].iov_len = 512; 4204 } 4205 4206 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4207 g_io_done = false; 4208 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4209 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4210 CU_ASSERT(rc == 0); 4211 CU_ASSERT(g_io_done == false); 4212 4213 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4214 4215 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4216 4217 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4218 CU_ASSERT(rc == 0); 4219 CU_ASSERT(g_io_done == true); 4220 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4221 stub_complete_io(1); 4222 CU_ASSERT(g_abort_done == true); 4223 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4224 4225 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4226 4227 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4228 4229 bdev->optimal_io_boundary = 16; 4230 g_io_done = false; 4231 4232 /* Test that a ingle-vector command which is split is aborted correctly. 4233 * Differently from the above, the child abort request will be submitted 4234 * sequentially due to the capacity of spdk_bdev_io. 4235 */ 4236 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4237 CU_ASSERT(rc == 0); 4238 CU_ASSERT(g_io_done == false); 4239 4240 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4241 4242 g_abort_done = false; 4243 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4244 4245 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4246 CU_ASSERT(rc == 0); 4247 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4248 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4249 4250 stub_complete_io(1); 4251 CU_ASSERT(g_io_done == true); 4252 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4253 stub_complete_io(3); 4254 CU_ASSERT(g_abort_done == true); 4255 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4256 4257 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4258 4259 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4260 4261 spdk_put_io_channel(io_ch); 4262 spdk_bdev_close(desc); 4263 free_bdev(bdev); 4264 spdk_bdev_finish(bdev_fini_cb, NULL); 4265 poll_threads(); 4266 } 4267 4268 static void 4269 bdev_set_options_test(void) 4270 { 4271 struct spdk_bdev_opts bdev_opts = {}; 4272 int rc; 4273 4274 /* Case1: Do not set opts_size */ 4275 rc = spdk_bdev_set_opts(&bdev_opts); 4276 CU_ASSERT(rc == -1); 4277 4278 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4279 bdev_opts.bdev_io_pool_size = 4; 4280 bdev_opts.bdev_io_cache_size = 2; 4281 bdev_opts.small_buf_pool_size = 4; 4282 4283 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4284 rc = spdk_bdev_set_opts(&bdev_opts); 4285 CU_ASSERT(rc == -1); 4286 4287 /* Case 3: Do not set valid large_buf_pool_size */ 4288 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4289 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4290 rc = spdk_bdev_set_opts(&bdev_opts); 4291 CU_ASSERT(rc == -1); 4292 4293 /* Case4: set valid large buf_pool_size */ 4294 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4295 rc = spdk_bdev_set_opts(&bdev_opts); 4296 CU_ASSERT(rc == 0); 4297 4298 /* Case5: Set different valid value for small and large buf pool */ 4299 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4300 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4301 rc = spdk_bdev_set_opts(&bdev_opts); 4302 CU_ASSERT(rc == 0); 4303 } 4304 4305 int 4306 main(int argc, char **argv) 4307 { 4308 CU_pSuite suite = NULL; 4309 unsigned int num_failures; 4310 4311 CU_set_error_action(CUEA_ABORT); 4312 CU_initialize_registry(); 4313 4314 suite = CU_add_suite("bdev", null_init, null_clean); 4315 4316 CU_ADD_TEST(suite, bytes_to_blocks_test); 4317 CU_ADD_TEST(suite, num_blocks_test); 4318 CU_ADD_TEST(suite, io_valid_test); 4319 CU_ADD_TEST(suite, open_write_test); 4320 CU_ADD_TEST(suite, alias_add_del_test); 4321 CU_ADD_TEST(suite, get_device_stat_test); 4322 CU_ADD_TEST(suite, bdev_io_types_test); 4323 CU_ADD_TEST(suite, bdev_io_wait_test); 4324 CU_ADD_TEST(suite, bdev_io_spans_split_test); 4325 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 4326 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 4327 CU_ADD_TEST(suite, bdev_io_mix_split_test); 4328 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 4329 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 4330 CU_ADD_TEST(suite, bdev_io_alignment); 4331 CU_ADD_TEST(suite, bdev_histograms); 4332 CU_ADD_TEST(suite, bdev_write_zeroes); 4333 CU_ADD_TEST(suite, bdev_compare_and_write); 4334 CU_ADD_TEST(suite, bdev_compare); 4335 CU_ADD_TEST(suite, bdev_open_while_hotremove); 4336 CU_ADD_TEST(suite, bdev_close_while_hotremove); 4337 CU_ADD_TEST(suite, bdev_open_ext); 4338 CU_ADD_TEST(suite, bdev_set_io_timeout); 4339 CU_ADD_TEST(suite, lba_range_overlap); 4340 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 4341 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 4342 CU_ADD_TEST(suite, lock_lba_range_overlapped); 4343 CU_ADD_TEST(suite, bdev_io_abort); 4344 CU_ADD_TEST(suite, bdev_set_options_test); 4345 4346 allocate_cores(1); 4347 allocate_threads(1); 4348 set_thread(0); 4349 4350 CU_basic_set_mode(CU_BRM_VERBOSE); 4351 CU_basic_run_tests(); 4352 num_failures = CU_get_number_of_failures(); 4353 CU_cleanup_registry(); 4354 4355 free_threads(); 4356 free_cores(); 4357 4358 return num_failures; 4359 } 4360