1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 struct spdk_trace_histories *g_trace_histories; 46 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 47 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 48 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 49 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 50 uint16_t tpoint_id, uint8_t owner_type, 51 uint8_t object_type, uint8_t new_object, 52 uint8_t arg1_type, const char *arg1_name)); 53 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 54 uint32_t size, uint64_t object_id, uint64_t arg1)); 55 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 56 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 57 58 59 int g_status; 60 int g_count; 61 enum spdk_bdev_event_type g_event_type1; 62 enum spdk_bdev_event_type g_event_type2; 63 struct spdk_histogram_data *g_histogram; 64 void *g_unregister_arg; 65 int g_unregister_rc; 66 67 void 68 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 69 int *sc, int *sk, int *asc, int *ascq) 70 { 71 } 72 73 static int 74 null_init(void) 75 { 76 return 0; 77 } 78 79 static int 80 null_clean(void) 81 { 82 return 0; 83 } 84 85 static int 86 stub_destruct(void *ctx) 87 { 88 return 0; 89 } 90 91 struct ut_expected_io { 92 uint8_t type; 93 uint64_t offset; 94 uint64_t length; 95 int iovcnt; 96 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 97 void *md_buf; 98 TAILQ_ENTRY(ut_expected_io) link; 99 }; 100 101 struct bdev_ut_channel { 102 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 103 uint32_t outstanding_io_count; 104 TAILQ_HEAD(, ut_expected_io) expected_io; 105 }; 106 107 static bool g_io_done; 108 static struct spdk_bdev_io *g_bdev_io; 109 static enum spdk_bdev_io_status g_io_status; 110 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 111 static uint32_t g_bdev_ut_io_device; 112 static struct bdev_ut_channel *g_bdev_ut_channel; 113 static void *g_compare_read_buf; 114 static uint32_t g_compare_read_buf_len; 115 static void *g_compare_write_buf; 116 static uint32_t g_compare_write_buf_len; 117 static bool g_abort_done; 118 static enum spdk_bdev_io_status g_abort_status; 119 120 static struct ut_expected_io * 121 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 122 { 123 struct ut_expected_io *expected_io; 124 125 expected_io = calloc(1, sizeof(*expected_io)); 126 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 127 128 expected_io->type = type; 129 expected_io->offset = offset; 130 expected_io->length = length; 131 expected_io->iovcnt = iovcnt; 132 133 return expected_io; 134 } 135 136 static void 137 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 138 { 139 expected_io->iov[pos].iov_base = base; 140 expected_io->iov[pos].iov_len = len; 141 } 142 143 static void 144 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 145 { 146 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 147 struct ut_expected_io *expected_io; 148 struct iovec *iov, *expected_iov; 149 struct spdk_bdev_io *bio_to_abort; 150 int i; 151 152 g_bdev_io = bdev_io; 153 154 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 155 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 156 157 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 158 CU_ASSERT(g_compare_read_buf_len == len); 159 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 160 } 161 162 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 163 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 164 165 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 166 CU_ASSERT(g_compare_write_buf_len == len); 167 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 168 } 169 170 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 171 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 172 173 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 174 CU_ASSERT(g_compare_read_buf_len == len); 175 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 176 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 177 } 178 } 179 180 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 181 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 182 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 183 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 184 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 185 ch->outstanding_io_count--; 186 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 187 break; 188 } 189 } 190 } 191 } 192 193 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 194 ch->outstanding_io_count++; 195 196 expected_io = TAILQ_FIRST(&ch->expected_io); 197 if (expected_io == NULL) { 198 return; 199 } 200 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 201 202 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 203 CU_ASSERT(bdev_io->type == expected_io->type); 204 } 205 206 if (expected_io->md_buf != NULL) { 207 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 208 } 209 210 if (expected_io->length == 0) { 211 free(expected_io); 212 return; 213 } 214 215 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 216 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 217 218 if (expected_io->iovcnt == 0) { 219 free(expected_io); 220 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 221 return; 222 } 223 224 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 225 for (i = 0; i < expected_io->iovcnt; i++) { 226 iov = &bdev_io->u.bdev.iovs[i]; 227 expected_iov = &expected_io->iov[i]; 228 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 229 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 230 } 231 232 free(expected_io); 233 } 234 235 static void 236 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 237 struct spdk_bdev_io *bdev_io, bool success) 238 { 239 CU_ASSERT(success == true); 240 241 stub_submit_request(_ch, bdev_io); 242 } 243 244 static void 245 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 246 { 247 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 248 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 249 } 250 251 static uint32_t 252 stub_complete_io(uint32_t num_to_complete) 253 { 254 struct bdev_ut_channel *ch = g_bdev_ut_channel; 255 struct spdk_bdev_io *bdev_io; 256 static enum spdk_bdev_io_status io_status; 257 uint32_t num_completed = 0; 258 259 while (num_completed < num_to_complete) { 260 if (TAILQ_EMPTY(&ch->outstanding_io)) { 261 break; 262 } 263 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 264 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 265 ch->outstanding_io_count--; 266 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 267 g_io_exp_status; 268 spdk_bdev_io_complete(bdev_io, io_status); 269 num_completed++; 270 } 271 272 return num_completed; 273 } 274 275 static struct spdk_io_channel * 276 bdev_ut_get_io_channel(void *ctx) 277 { 278 return spdk_get_io_channel(&g_bdev_ut_io_device); 279 } 280 281 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 282 [SPDK_BDEV_IO_TYPE_READ] = true, 283 [SPDK_BDEV_IO_TYPE_WRITE] = true, 284 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 285 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 286 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 287 [SPDK_BDEV_IO_TYPE_RESET] = true, 288 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 289 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 290 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 291 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 292 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 293 [SPDK_BDEV_IO_TYPE_ABORT] = true, 294 }; 295 296 static void 297 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 298 { 299 g_io_types_supported[io_type] = enable; 300 } 301 302 static bool 303 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 304 { 305 return g_io_types_supported[io_type]; 306 } 307 308 static struct spdk_bdev_fn_table fn_table = { 309 .destruct = stub_destruct, 310 .submit_request = stub_submit_request, 311 .get_io_channel = bdev_ut_get_io_channel, 312 .io_type_supported = stub_io_type_supported, 313 }; 314 315 static int 316 bdev_ut_create_ch(void *io_device, void *ctx_buf) 317 { 318 struct bdev_ut_channel *ch = ctx_buf; 319 320 CU_ASSERT(g_bdev_ut_channel == NULL); 321 g_bdev_ut_channel = ch; 322 323 TAILQ_INIT(&ch->outstanding_io); 324 ch->outstanding_io_count = 0; 325 TAILQ_INIT(&ch->expected_io); 326 return 0; 327 } 328 329 static void 330 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 331 { 332 CU_ASSERT(g_bdev_ut_channel != NULL); 333 g_bdev_ut_channel = NULL; 334 } 335 336 struct spdk_bdev_module bdev_ut_if; 337 338 static int 339 bdev_ut_module_init(void) 340 { 341 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 342 sizeof(struct bdev_ut_channel), NULL); 343 spdk_bdev_module_init_done(&bdev_ut_if); 344 return 0; 345 } 346 347 static void 348 bdev_ut_module_fini(void) 349 { 350 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 351 } 352 353 struct spdk_bdev_module bdev_ut_if = { 354 .name = "bdev_ut", 355 .module_init = bdev_ut_module_init, 356 .module_fini = bdev_ut_module_fini, 357 .async_init = true, 358 }; 359 360 static void vbdev_ut_examine(struct spdk_bdev *bdev); 361 362 static int 363 vbdev_ut_module_init(void) 364 { 365 return 0; 366 } 367 368 static void 369 vbdev_ut_module_fini(void) 370 { 371 } 372 373 struct spdk_bdev_module vbdev_ut_if = { 374 .name = "vbdev_ut", 375 .module_init = vbdev_ut_module_init, 376 .module_fini = vbdev_ut_module_fini, 377 .examine_config = vbdev_ut_examine, 378 }; 379 380 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 381 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 382 383 static void 384 vbdev_ut_examine(struct spdk_bdev *bdev) 385 { 386 spdk_bdev_module_examine_done(&vbdev_ut_if); 387 } 388 389 static struct spdk_bdev * 390 allocate_bdev(char *name) 391 { 392 struct spdk_bdev *bdev; 393 int rc; 394 395 bdev = calloc(1, sizeof(*bdev)); 396 SPDK_CU_ASSERT_FATAL(bdev != NULL); 397 398 bdev->name = name; 399 bdev->fn_table = &fn_table; 400 bdev->module = &bdev_ut_if; 401 bdev->blockcnt = 1024; 402 bdev->blocklen = 512; 403 404 rc = spdk_bdev_register(bdev); 405 CU_ASSERT(rc == 0); 406 407 return bdev; 408 } 409 410 static struct spdk_bdev * 411 allocate_vbdev(char *name) 412 { 413 struct spdk_bdev *bdev; 414 int rc; 415 416 bdev = calloc(1, sizeof(*bdev)); 417 SPDK_CU_ASSERT_FATAL(bdev != NULL); 418 419 bdev->name = name; 420 bdev->fn_table = &fn_table; 421 bdev->module = &vbdev_ut_if; 422 423 rc = spdk_bdev_register(bdev); 424 CU_ASSERT(rc == 0); 425 426 return bdev; 427 } 428 429 static void 430 free_bdev(struct spdk_bdev *bdev) 431 { 432 spdk_bdev_unregister(bdev, NULL, NULL); 433 poll_threads(); 434 memset(bdev, 0xFF, sizeof(*bdev)); 435 free(bdev); 436 } 437 438 static void 439 free_vbdev(struct spdk_bdev *bdev) 440 { 441 spdk_bdev_unregister(bdev, NULL, NULL); 442 poll_threads(); 443 memset(bdev, 0xFF, sizeof(*bdev)); 444 free(bdev); 445 } 446 447 static void 448 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 449 { 450 const char *bdev_name; 451 452 CU_ASSERT(bdev != NULL); 453 CU_ASSERT(rc == 0); 454 bdev_name = spdk_bdev_get_name(bdev); 455 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 456 457 free(stat); 458 459 *(bool *)cb_arg = true; 460 } 461 462 static void 463 bdev_unregister_cb(void *cb_arg, int rc) 464 { 465 g_unregister_arg = cb_arg; 466 g_unregister_rc = rc; 467 } 468 469 static void 470 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 471 { 472 } 473 474 static void 475 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 476 { 477 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 478 479 g_event_type1 = type; 480 if (SPDK_BDEV_EVENT_REMOVE == type) { 481 spdk_bdev_close(desc); 482 } 483 } 484 485 static void 486 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 487 { 488 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 489 490 g_event_type2 = type; 491 if (SPDK_BDEV_EVENT_REMOVE == type) { 492 spdk_bdev_close(desc); 493 } 494 } 495 496 static void 497 get_device_stat_test(void) 498 { 499 struct spdk_bdev *bdev; 500 struct spdk_bdev_io_stat *stat; 501 bool done; 502 503 bdev = allocate_bdev("bdev0"); 504 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 505 if (stat == NULL) { 506 free_bdev(bdev); 507 return; 508 } 509 510 done = false; 511 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 512 while (!done) { poll_threads(); } 513 514 free_bdev(bdev); 515 } 516 517 static void 518 open_write_test(void) 519 { 520 struct spdk_bdev *bdev[9]; 521 struct spdk_bdev_desc *desc[9] = {}; 522 int rc; 523 524 /* 525 * Create a tree of bdevs to test various open w/ write cases. 526 * 527 * bdev0 through bdev3 are physical block devices, such as NVMe 528 * namespaces or Ceph block devices. 529 * 530 * bdev4 is a virtual bdev with multiple base bdevs. This models 531 * caching or RAID use cases. 532 * 533 * bdev5 through bdev7 are all virtual bdevs with the same base 534 * bdev (except bdev7). This models partitioning or logical volume 535 * use cases. 536 * 537 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 538 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 539 * models caching, RAID, partitioning or logical volumes use cases. 540 * 541 * bdev8 is a virtual bdev with multiple base bdevs, but these 542 * base bdevs are themselves virtual bdevs. 543 * 544 * bdev8 545 * | 546 * +----------+ 547 * | | 548 * bdev4 bdev5 bdev6 bdev7 549 * | | | | 550 * +---+---+ +---+ + +---+---+ 551 * | | \ | / \ 552 * bdev0 bdev1 bdev2 bdev3 553 */ 554 555 bdev[0] = allocate_bdev("bdev0"); 556 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 557 CU_ASSERT(rc == 0); 558 559 bdev[1] = allocate_bdev("bdev1"); 560 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 561 CU_ASSERT(rc == 0); 562 563 bdev[2] = allocate_bdev("bdev2"); 564 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 565 CU_ASSERT(rc == 0); 566 567 bdev[3] = allocate_bdev("bdev3"); 568 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 569 CU_ASSERT(rc == 0); 570 571 bdev[4] = allocate_vbdev("bdev4"); 572 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 573 CU_ASSERT(rc == 0); 574 575 bdev[5] = allocate_vbdev("bdev5"); 576 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 577 CU_ASSERT(rc == 0); 578 579 bdev[6] = allocate_vbdev("bdev6"); 580 581 bdev[7] = allocate_vbdev("bdev7"); 582 583 bdev[8] = allocate_vbdev("bdev8"); 584 585 /* Open bdev0 read-only. This should succeed. */ 586 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 587 CU_ASSERT(rc == 0); 588 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 589 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 590 spdk_bdev_close(desc[0]); 591 592 /* 593 * Open bdev1 read/write. This should fail since bdev1 has been claimed 594 * by a vbdev module. 595 */ 596 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 597 CU_ASSERT(rc == -EPERM); 598 599 /* 600 * Open bdev4 read/write. This should fail since bdev3 has been claimed 601 * by a vbdev module. 602 */ 603 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 604 CU_ASSERT(rc == -EPERM); 605 606 /* Open bdev4 read-only. This should succeed. */ 607 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 608 CU_ASSERT(rc == 0); 609 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 610 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 611 spdk_bdev_close(desc[4]); 612 613 /* 614 * Open bdev8 read/write. This should succeed since it is a leaf 615 * bdev. 616 */ 617 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 618 CU_ASSERT(rc == 0); 619 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 620 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 621 spdk_bdev_close(desc[8]); 622 623 /* 624 * Open bdev5 read/write. This should fail since bdev4 has been claimed 625 * by a vbdev module. 626 */ 627 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 628 CU_ASSERT(rc == -EPERM); 629 630 /* Open bdev4 read-only. This should succeed. */ 631 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 632 CU_ASSERT(rc == 0); 633 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 634 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 635 spdk_bdev_close(desc[5]); 636 637 free_vbdev(bdev[8]); 638 639 free_vbdev(bdev[5]); 640 free_vbdev(bdev[6]); 641 free_vbdev(bdev[7]); 642 643 free_vbdev(bdev[4]); 644 645 free_bdev(bdev[0]); 646 free_bdev(bdev[1]); 647 free_bdev(bdev[2]); 648 free_bdev(bdev[3]); 649 } 650 651 static void 652 bytes_to_blocks_test(void) 653 { 654 struct spdk_bdev bdev; 655 uint64_t offset_blocks, num_blocks; 656 657 memset(&bdev, 0, sizeof(bdev)); 658 659 bdev.blocklen = 512; 660 661 /* All parameters valid */ 662 offset_blocks = 0; 663 num_blocks = 0; 664 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 665 CU_ASSERT(offset_blocks == 1); 666 CU_ASSERT(num_blocks == 2); 667 668 /* Offset not a block multiple */ 669 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 670 671 /* Length not a block multiple */ 672 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 673 674 /* In case blocklen not the power of two */ 675 bdev.blocklen = 100; 676 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 677 CU_ASSERT(offset_blocks == 1); 678 CU_ASSERT(num_blocks == 2); 679 680 /* Offset not a block multiple */ 681 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 682 683 /* Length not a block multiple */ 684 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 685 } 686 687 static void 688 num_blocks_test(void) 689 { 690 struct spdk_bdev bdev; 691 struct spdk_bdev_desc *desc = NULL; 692 int rc; 693 694 memset(&bdev, 0, sizeof(bdev)); 695 bdev.name = "num_blocks"; 696 bdev.fn_table = &fn_table; 697 bdev.module = &bdev_ut_if; 698 spdk_bdev_register(&bdev); 699 spdk_bdev_notify_blockcnt_change(&bdev, 50); 700 701 /* Growing block number */ 702 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 703 /* Shrinking block number */ 704 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 705 706 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 707 CU_ASSERT(rc == 0); 708 SPDK_CU_ASSERT_FATAL(desc != NULL); 709 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 710 711 /* Growing block number */ 712 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 713 /* Shrinking block number */ 714 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 715 716 g_event_type1 = 0xFF; 717 /* Growing block number */ 718 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 719 720 poll_threads(); 721 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 722 723 g_event_type1 = 0xFF; 724 /* Growing block number and closing */ 725 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 726 727 spdk_bdev_close(desc); 728 spdk_bdev_unregister(&bdev, NULL, NULL); 729 730 poll_threads(); 731 732 /* Callback is not called for closed device */ 733 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 734 } 735 736 static void 737 io_valid_test(void) 738 { 739 struct spdk_bdev bdev; 740 741 memset(&bdev, 0, sizeof(bdev)); 742 743 bdev.blocklen = 512; 744 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 745 746 spdk_bdev_notify_blockcnt_change(&bdev, 100); 747 748 /* All parameters valid */ 749 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 750 751 /* Last valid block */ 752 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 753 754 /* Offset past end of bdev */ 755 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 756 757 /* Offset + length past end of bdev */ 758 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 759 760 /* Offset near end of uint64_t range (2^64 - 1) */ 761 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 762 763 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 764 } 765 766 static void 767 alias_add_del_test(void) 768 { 769 struct spdk_bdev *bdev[3]; 770 int rc; 771 772 /* Creating and registering bdevs */ 773 bdev[0] = allocate_bdev("bdev0"); 774 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 775 776 bdev[1] = allocate_bdev("bdev1"); 777 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 778 779 bdev[2] = allocate_bdev("bdev2"); 780 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 781 782 poll_threads(); 783 784 /* 785 * Trying adding an alias identical to name. 786 * Alias is identical to name, so it can not be added to aliases list 787 */ 788 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 789 CU_ASSERT(rc == -EEXIST); 790 791 /* 792 * Trying to add empty alias, 793 * this one should fail 794 */ 795 rc = spdk_bdev_alias_add(bdev[0], NULL); 796 CU_ASSERT(rc == -EINVAL); 797 798 /* Trying adding same alias to two different registered bdevs */ 799 800 /* Alias is used first time, so this one should pass */ 801 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 802 CU_ASSERT(rc == 0); 803 804 /* Alias was added to another bdev, so this one should fail */ 805 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 806 CU_ASSERT(rc == -EEXIST); 807 808 /* Alias is used first time, so this one should pass */ 809 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 810 CU_ASSERT(rc == 0); 811 812 /* Trying removing an alias from registered bdevs */ 813 814 /* Alias is not on a bdev aliases list, so this one should fail */ 815 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 816 CU_ASSERT(rc == -ENOENT); 817 818 /* Alias is present on a bdev aliases list, so this one should pass */ 819 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 820 CU_ASSERT(rc == 0); 821 822 /* Alias is present on a bdev aliases list, so this one should pass */ 823 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 824 CU_ASSERT(rc == 0); 825 826 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 827 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 828 CU_ASSERT(rc != 0); 829 830 /* Trying to del all alias from empty alias list */ 831 spdk_bdev_alias_del_all(bdev[2]); 832 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 833 834 /* Trying to del all alias from non-empty alias list */ 835 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 836 CU_ASSERT(rc == 0); 837 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 838 CU_ASSERT(rc == 0); 839 spdk_bdev_alias_del_all(bdev[2]); 840 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 841 842 /* Unregister and free bdevs */ 843 spdk_bdev_unregister(bdev[0], NULL, NULL); 844 spdk_bdev_unregister(bdev[1], NULL, NULL); 845 spdk_bdev_unregister(bdev[2], NULL, NULL); 846 847 poll_threads(); 848 849 free(bdev[0]); 850 free(bdev[1]); 851 free(bdev[2]); 852 } 853 854 static void 855 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 856 { 857 g_io_done = true; 858 g_io_status = bdev_io->internal.status; 859 spdk_bdev_free_io(bdev_io); 860 } 861 862 static void 863 bdev_init_cb(void *arg, int rc) 864 { 865 CU_ASSERT(rc == 0); 866 } 867 868 static void 869 bdev_fini_cb(void *arg) 870 { 871 } 872 873 struct bdev_ut_io_wait_entry { 874 struct spdk_bdev_io_wait_entry entry; 875 struct spdk_io_channel *io_ch; 876 struct spdk_bdev_desc *desc; 877 bool submitted; 878 }; 879 880 static void 881 io_wait_cb(void *arg) 882 { 883 struct bdev_ut_io_wait_entry *entry = arg; 884 int rc; 885 886 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 887 CU_ASSERT(rc == 0); 888 entry->submitted = true; 889 } 890 891 static void 892 bdev_io_types_test(void) 893 { 894 struct spdk_bdev *bdev; 895 struct spdk_bdev_desc *desc = NULL; 896 struct spdk_io_channel *io_ch; 897 struct spdk_bdev_opts bdev_opts = {}; 898 int rc; 899 900 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 901 bdev_opts.bdev_io_pool_size = 4; 902 bdev_opts.bdev_io_cache_size = 2; 903 904 rc = spdk_bdev_set_opts(&bdev_opts); 905 CU_ASSERT(rc == 0); 906 spdk_bdev_initialize(bdev_init_cb, NULL); 907 poll_threads(); 908 909 bdev = allocate_bdev("bdev0"); 910 911 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 912 CU_ASSERT(rc == 0); 913 poll_threads(); 914 SPDK_CU_ASSERT_FATAL(desc != NULL); 915 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 916 io_ch = spdk_bdev_get_io_channel(desc); 917 CU_ASSERT(io_ch != NULL); 918 919 /* WRITE and WRITE ZEROES are not supported */ 920 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 921 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 922 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 923 CU_ASSERT(rc == -ENOTSUP); 924 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 925 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 926 927 spdk_put_io_channel(io_ch); 928 spdk_bdev_close(desc); 929 free_bdev(bdev); 930 spdk_bdev_finish(bdev_fini_cb, NULL); 931 poll_threads(); 932 } 933 934 static void 935 bdev_io_wait_test(void) 936 { 937 struct spdk_bdev *bdev; 938 struct spdk_bdev_desc *desc = NULL; 939 struct spdk_io_channel *io_ch; 940 struct spdk_bdev_opts bdev_opts = {}; 941 struct bdev_ut_io_wait_entry io_wait_entry; 942 struct bdev_ut_io_wait_entry io_wait_entry2; 943 int rc; 944 945 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 946 bdev_opts.bdev_io_pool_size = 4; 947 bdev_opts.bdev_io_cache_size = 2; 948 949 rc = spdk_bdev_set_opts(&bdev_opts); 950 CU_ASSERT(rc == 0); 951 spdk_bdev_initialize(bdev_init_cb, NULL); 952 poll_threads(); 953 954 bdev = allocate_bdev("bdev0"); 955 956 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 957 CU_ASSERT(rc == 0); 958 poll_threads(); 959 SPDK_CU_ASSERT_FATAL(desc != NULL); 960 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 961 io_ch = spdk_bdev_get_io_channel(desc); 962 CU_ASSERT(io_ch != NULL); 963 964 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 965 CU_ASSERT(rc == 0); 966 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 967 CU_ASSERT(rc == 0); 968 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 969 CU_ASSERT(rc == 0); 970 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 971 CU_ASSERT(rc == 0); 972 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 973 974 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 975 CU_ASSERT(rc == -ENOMEM); 976 977 io_wait_entry.entry.bdev = bdev; 978 io_wait_entry.entry.cb_fn = io_wait_cb; 979 io_wait_entry.entry.cb_arg = &io_wait_entry; 980 io_wait_entry.io_ch = io_ch; 981 io_wait_entry.desc = desc; 982 io_wait_entry.submitted = false; 983 /* Cannot use the same io_wait_entry for two different calls. */ 984 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 985 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 986 987 /* Queue two I/O waits. */ 988 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 989 CU_ASSERT(rc == 0); 990 CU_ASSERT(io_wait_entry.submitted == false); 991 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 992 CU_ASSERT(rc == 0); 993 CU_ASSERT(io_wait_entry2.submitted == false); 994 995 stub_complete_io(1); 996 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 997 CU_ASSERT(io_wait_entry.submitted == true); 998 CU_ASSERT(io_wait_entry2.submitted == false); 999 1000 stub_complete_io(1); 1001 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1002 CU_ASSERT(io_wait_entry2.submitted == true); 1003 1004 stub_complete_io(4); 1005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1006 1007 spdk_put_io_channel(io_ch); 1008 spdk_bdev_close(desc); 1009 free_bdev(bdev); 1010 spdk_bdev_finish(bdev_fini_cb, NULL); 1011 poll_threads(); 1012 } 1013 1014 static void 1015 bdev_io_spans_split_test(void) 1016 { 1017 struct spdk_bdev bdev; 1018 struct spdk_bdev_io bdev_io; 1019 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1020 1021 memset(&bdev, 0, sizeof(bdev)); 1022 bdev_io.u.bdev.iovs = iov; 1023 1024 bdev.optimal_io_boundary = 0; 1025 bdev.max_segment_size = 0; 1026 bdev.max_num_segments = 0; 1027 bdev_io.bdev = &bdev; 1028 1029 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1030 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1031 1032 bdev.split_on_optimal_io_boundary = true; 1033 bdev.optimal_io_boundary = 32; 1034 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1035 1036 /* RESETs are not based on LBAs - so this should return false. */ 1037 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1038 1039 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1040 bdev_io.u.bdev.offset_blocks = 0; 1041 bdev_io.u.bdev.num_blocks = 32; 1042 1043 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1044 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1045 1046 bdev_io.u.bdev.num_blocks = 33; 1047 1048 /* This I/O spans a boundary. */ 1049 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1050 1051 bdev_io.u.bdev.num_blocks = 32; 1052 bdev.max_segment_size = 512 * 32; 1053 bdev.max_num_segments = 1; 1054 bdev_io.u.bdev.iovcnt = 1; 1055 iov[0].iov_len = 512; 1056 1057 /* Does not cross and exceed max_size or max_segs */ 1058 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1059 1060 bdev.split_on_optimal_io_boundary = false; 1061 bdev.max_segment_size = 512; 1062 bdev.max_num_segments = 1; 1063 bdev_io.u.bdev.iovcnt = 2; 1064 1065 /* Exceed max_segs */ 1066 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1067 1068 bdev.max_num_segments = 2; 1069 iov[0].iov_len = 513; 1070 iov[1].iov_len = 512; 1071 1072 /* Exceed max_sizes */ 1073 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1074 } 1075 1076 static void 1077 bdev_io_boundary_split_test(void) 1078 { 1079 struct spdk_bdev *bdev; 1080 struct spdk_bdev_desc *desc = NULL; 1081 struct spdk_io_channel *io_ch; 1082 struct spdk_bdev_opts bdev_opts = {}; 1083 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1084 struct ut_expected_io *expected_io; 1085 void *md_buf = (void *)0xFF000000; 1086 uint64_t i; 1087 int rc; 1088 1089 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1090 bdev_opts.bdev_io_pool_size = 512; 1091 bdev_opts.bdev_io_cache_size = 64; 1092 1093 rc = spdk_bdev_set_opts(&bdev_opts); 1094 CU_ASSERT(rc == 0); 1095 spdk_bdev_initialize(bdev_init_cb, NULL); 1096 1097 bdev = allocate_bdev("bdev0"); 1098 1099 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1100 CU_ASSERT(rc == 0); 1101 SPDK_CU_ASSERT_FATAL(desc != NULL); 1102 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1103 io_ch = spdk_bdev_get_io_channel(desc); 1104 CU_ASSERT(io_ch != NULL); 1105 1106 bdev->optimal_io_boundary = 16; 1107 bdev->split_on_optimal_io_boundary = false; 1108 1109 g_io_done = false; 1110 1111 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1112 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1113 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1114 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1115 1116 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1117 CU_ASSERT(rc == 0); 1118 CU_ASSERT(g_io_done == false); 1119 1120 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1121 stub_complete_io(1); 1122 CU_ASSERT(g_io_done == true); 1123 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1124 1125 bdev->split_on_optimal_io_boundary = true; 1126 bdev->md_interleave = false; 1127 bdev->md_len = 8; 1128 1129 /* Now test that a single-vector command is split correctly. 1130 * Offset 14, length 8, payload 0xF000 1131 * Child - Offset 14, length 2, payload 0xF000 1132 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1133 * 1134 * Set up the expected values before calling spdk_bdev_read_blocks 1135 */ 1136 g_io_done = false; 1137 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1138 expected_io->md_buf = md_buf; 1139 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1140 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1141 1142 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1143 expected_io->md_buf = md_buf + 2 * 8; 1144 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1145 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1146 1147 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1148 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1149 14, 8, io_done, NULL); 1150 CU_ASSERT(rc == 0); 1151 CU_ASSERT(g_io_done == false); 1152 1153 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1154 stub_complete_io(2); 1155 CU_ASSERT(g_io_done == true); 1156 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1157 1158 /* Now set up a more complex, multi-vector command that needs to be split, 1159 * including splitting iovecs. 1160 */ 1161 iov[0].iov_base = (void *)0x10000; 1162 iov[0].iov_len = 512; 1163 iov[1].iov_base = (void *)0x20000; 1164 iov[1].iov_len = 20 * 512; 1165 iov[2].iov_base = (void *)0x30000; 1166 iov[2].iov_len = 11 * 512; 1167 1168 g_io_done = false; 1169 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1170 expected_io->md_buf = md_buf; 1171 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1172 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1173 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1174 1175 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1176 expected_io->md_buf = md_buf + 2 * 8; 1177 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1178 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1179 1180 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1181 expected_io->md_buf = md_buf + 18 * 8; 1182 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1183 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1184 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1185 1186 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1187 14, 32, io_done, NULL); 1188 CU_ASSERT(rc == 0); 1189 CU_ASSERT(g_io_done == false); 1190 1191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1192 stub_complete_io(3); 1193 CU_ASSERT(g_io_done == true); 1194 1195 /* Test multi vector command that needs to be split by strip and then needs to be 1196 * split further due to the capacity of child iovs. 1197 */ 1198 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1199 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1200 iov[i].iov_len = 512; 1201 } 1202 1203 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1204 g_io_done = false; 1205 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1206 BDEV_IO_NUM_CHILD_IOV); 1207 expected_io->md_buf = md_buf; 1208 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1209 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1210 } 1211 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1212 1213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1214 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1215 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1216 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1217 ut_expected_io_set_iov(expected_io, i, 1218 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1219 } 1220 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1221 1222 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1223 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1224 CU_ASSERT(rc == 0); 1225 CU_ASSERT(g_io_done == false); 1226 1227 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1228 stub_complete_io(1); 1229 CU_ASSERT(g_io_done == false); 1230 1231 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1232 stub_complete_io(1); 1233 CU_ASSERT(g_io_done == true); 1234 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1235 1236 /* Test multi vector command that needs to be split by strip and then needs to be 1237 * split further due to the capacity of child iovs. In this case, the length of 1238 * the rest of iovec array with an I/O boundary is the multiple of block size. 1239 */ 1240 1241 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1242 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1243 */ 1244 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1245 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1246 iov[i].iov_len = 512; 1247 } 1248 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1249 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1250 iov[i].iov_len = 256; 1251 } 1252 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1253 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1254 1255 /* Add an extra iovec to trigger split */ 1256 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1257 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1258 1259 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1260 g_io_done = false; 1261 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1262 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1263 expected_io->md_buf = md_buf; 1264 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1265 ut_expected_io_set_iov(expected_io, i, 1266 (void *)((i + 1) * 0x10000), 512); 1267 } 1268 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1269 ut_expected_io_set_iov(expected_io, i, 1270 (void *)((i + 1) * 0x10000), 256); 1271 } 1272 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1273 1274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1275 1, 1); 1276 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1277 ut_expected_io_set_iov(expected_io, 0, 1278 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1279 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1280 1281 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1282 1, 1); 1283 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1284 ut_expected_io_set_iov(expected_io, 0, 1285 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1286 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1287 1288 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1289 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1290 CU_ASSERT(rc == 0); 1291 CU_ASSERT(g_io_done == false); 1292 1293 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1294 stub_complete_io(1); 1295 CU_ASSERT(g_io_done == false); 1296 1297 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1298 stub_complete_io(2); 1299 CU_ASSERT(g_io_done == true); 1300 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1301 1302 /* Test multi vector command that needs to be split by strip and then needs to be 1303 * split further due to the capacity of child iovs, the child request offset should 1304 * be rewind to last aligned offset and go success without error. 1305 */ 1306 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1307 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1308 iov[i].iov_len = 512; 1309 } 1310 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1311 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1312 1313 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1314 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1315 1316 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1317 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1318 1319 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1320 g_io_done = false; 1321 g_io_status = 0; 1322 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1323 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1324 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1325 expected_io->md_buf = md_buf; 1326 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1327 ut_expected_io_set_iov(expected_io, i, 1328 (void *)((i + 1) * 0x10000), 512); 1329 } 1330 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1331 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1332 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1333 1, 2); 1334 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1335 ut_expected_io_set_iov(expected_io, 0, 1336 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1337 ut_expected_io_set_iov(expected_io, 1, 1338 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1339 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1340 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1341 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1342 1, 1); 1343 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1344 ut_expected_io_set_iov(expected_io, 0, 1345 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1346 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1347 1348 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1349 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1350 CU_ASSERT(rc == 0); 1351 CU_ASSERT(g_io_done == false); 1352 1353 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1354 stub_complete_io(1); 1355 CU_ASSERT(g_io_done == false); 1356 1357 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1358 stub_complete_io(2); 1359 CU_ASSERT(g_io_done == true); 1360 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1361 1362 /* Test multi vector command that needs to be split due to the IO boundary and 1363 * the capacity of child iovs. Especially test the case when the command is 1364 * split due to the capacity of child iovs, the tail address is not aligned with 1365 * block size and is rewinded to the aligned address. 1366 * 1367 * The iovecs used in read request is complex but is based on the data 1368 * collected in the real issue. We change the base addresses but keep the lengths 1369 * not to loose the credibility of the test. 1370 */ 1371 bdev->optimal_io_boundary = 128; 1372 g_io_done = false; 1373 g_io_status = 0; 1374 1375 for (i = 0; i < 31; i++) { 1376 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1377 iov[i].iov_len = 1024; 1378 } 1379 iov[31].iov_base = (void *)0xFEED1F00000; 1380 iov[31].iov_len = 32768; 1381 iov[32].iov_base = (void *)0xFEED2000000; 1382 iov[32].iov_len = 160; 1383 iov[33].iov_base = (void *)0xFEED2100000; 1384 iov[33].iov_len = 4096; 1385 iov[34].iov_base = (void *)0xFEED2200000; 1386 iov[34].iov_len = 4096; 1387 iov[35].iov_base = (void *)0xFEED2300000; 1388 iov[35].iov_len = 4096; 1389 iov[36].iov_base = (void *)0xFEED2400000; 1390 iov[36].iov_len = 4096; 1391 iov[37].iov_base = (void *)0xFEED2500000; 1392 iov[37].iov_len = 4096; 1393 iov[38].iov_base = (void *)0xFEED2600000; 1394 iov[38].iov_len = 4096; 1395 iov[39].iov_base = (void *)0xFEED2700000; 1396 iov[39].iov_len = 4096; 1397 iov[40].iov_base = (void *)0xFEED2800000; 1398 iov[40].iov_len = 4096; 1399 iov[41].iov_base = (void *)0xFEED2900000; 1400 iov[41].iov_len = 4096; 1401 iov[42].iov_base = (void *)0xFEED2A00000; 1402 iov[42].iov_len = 4096; 1403 iov[43].iov_base = (void *)0xFEED2B00000; 1404 iov[43].iov_len = 12288; 1405 iov[44].iov_base = (void *)0xFEED2C00000; 1406 iov[44].iov_len = 8192; 1407 iov[45].iov_base = (void *)0xFEED2F00000; 1408 iov[45].iov_len = 4096; 1409 iov[46].iov_base = (void *)0xFEED3000000; 1410 iov[46].iov_len = 4096; 1411 iov[47].iov_base = (void *)0xFEED3100000; 1412 iov[47].iov_len = 4096; 1413 iov[48].iov_base = (void *)0xFEED3200000; 1414 iov[48].iov_len = 24576; 1415 iov[49].iov_base = (void *)0xFEED3300000; 1416 iov[49].iov_len = 16384; 1417 iov[50].iov_base = (void *)0xFEED3400000; 1418 iov[50].iov_len = 12288; 1419 iov[51].iov_base = (void *)0xFEED3500000; 1420 iov[51].iov_len = 4096; 1421 iov[52].iov_base = (void *)0xFEED3600000; 1422 iov[52].iov_len = 4096; 1423 iov[53].iov_base = (void *)0xFEED3700000; 1424 iov[53].iov_len = 4096; 1425 iov[54].iov_base = (void *)0xFEED3800000; 1426 iov[54].iov_len = 28672; 1427 iov[55].iov_base = (void *)0xFEED3900000; 1428 iov[55].iov_len = 20480; 1429 iov[56].iov_base = (void *)0xFEED3A00000; 1430 iov[56].iov_len = 4096; 1431 iov[57].iov_base = (void *)0xFEED3B00000; 1432 iov[57].iov_len = 12288; 1433 iov[58].iov_base = (void *)0xFEED3C00000; 1434 iov[58].iov_len = 4096; 1435 iov[59].iov_base = (void *)0xFEED3D00000; 1436 iov[59].iov_len = 4096; 1437 iov[60].iov_base = (void *)0xFEED3E00000; 1438 iov[60].iov_len = 352; 1439 1440 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1441 * of child iovs, 1442 */ 1443 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1444 expected_io->md_buf = md_buf; 1445 for (i = 0; i < 32; i++) { 1446 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1447 } 1448 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1449 1450 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1451 * split by the IO boundary requirement. 1452 */ 1453 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1454 expected_io->md_buf = md_buf + 126 * 8; 1455 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1456 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1457 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1458 1459 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1460 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1461 */ 1462 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1463 expected_io->md_buf = md_buf + 128 * 8; 1464 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1465 iov[33].iov_len - 864); 1466 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1467 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1468 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1469 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1470 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1471 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1472 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1473 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1474 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1475 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1476 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1477 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1478 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1479 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1480 1481 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1482 * first 864 bytes of iov[52] split by the IO boundary requirement. 1483 */ 1484 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1485 expected_io->md_buf = md_buf + 256 * 8; 1486 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1487 iov[46].iov_len - 864); 1488 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1489 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1490 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1491 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1492 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1493 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1495 1496 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1497 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1498 */ 1499 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1500 expected_io->md_buf = md_buf + 384 * 8; 1501 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1502 iov[52].iov_len - 864); 1503 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1504 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1505 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1506 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1507 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1508 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1509 1510 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1511 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1512 */ 1513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1514 expected_io->md_buf = md_buf + 512 * 8; 1515 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1516 iov[57].iov_len - 4960); 1517 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1518 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1519 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1520 1521 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1522 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1523 expected_io->md_buf = md_buf + 542 * 8; 1524 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1525 iov[59].iov_len - 3936); 1526 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1527 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1528 1529 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1530 0, 543, io_done, NULL); 1531 CU_ASSERT(rc == 0); 1532 CU_ASSERT(g_io_done == false); 1533 1534 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1535 stub_complete_io(1); 1536 CU_ASSERT(g_io_done == false); 1537 1538 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1539 stub_complete_io(5); 1540 CU_ASSERT(g_io_done == false); 1541 1542 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1543 stub_complete_io(1); 1544 CU_ASSERT(g_io_done == true); 1545 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1546 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1547 1548 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1549 * split, so test that. 1550 */ 1551 bdev->optimal_io_boundary = 15; 1552 g_io_done = false; 1553 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1555 1556 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1557 CU_ASSERT(rc == 0); 1558 CU_ASSERT(g_io_done == false); 1559 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1560 stub_complete_io(1); 1561 CU_ASSERT(g_io_done == true); 1562 1563 /* Test an UNMAP. This should also not be split. */ 1564 bdev->optimal_io_boundary = 16; 1565 g_io_done = false; 1566 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1567 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1568 1569 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1570 CU_ASSERT(rc == 0); 1571 CU_ASSERT(g_io_done == false); 1572 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1573 stub_complete_io(1); 1574 CU_ASSERT(g_io_done == true); 1575 1576 /* Test a FLUSH. This should also not be split. */ 1577 bdev->optimal_io_boundary = 16; 1578 g_io_done = false; 1579 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1580 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1581 1582 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1583 CU_ASSERT(rc == 0); 1584 CU_ASSERT(g_io_done == false); 1585 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1586 stub_complete_io(1); 1587 CU_ASSERT(g_io_done == true); 1588 1589 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1590 1591 /* Children requests return an error status */ 1592 bdev->optimal_io_boundary = 16; 1593 iov[0].iov_base = (void *)0x10000; 1594 iov[0].iov_len = 512 * 64; 1595 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1596 g_io_done = false; 1597 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1598 1599 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1600 CU_ASSERT(rc == 0); 1601 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1602 stub_complete_io(4); 1603 CU_ASSERT(g_io_done == false); 1604 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1605 stub_complete_io(1); 1606 CU_ASSERT(g_io_done == true); 1607 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1608 1609 /* Test if a multi vector command terminated with failure before continueing 1610 * splitting process when one of child I/O failed. 1611 * The multi vector command is as same as the above that needs to be split by strip 1612 * and then needs to be split further due to the capacity of child iovs. 1613 */ 1614 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1615 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1616 iov[i].iov_len = 512; 1617 } 1618 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1619 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1620 1621 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1622 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1623 1624 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1625 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1626 1627 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1628 1629 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1630 g_io_done = false; 1631 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1632 1633 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1634 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1635 CU_ASSERT(rc == 0); 1636 CU_ASSERT(g_io_done == false); 1637 1638 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1639 stub_complete_io(1); 1640 CU_ASSERT(g_io_done == true); 1641 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1642 1643 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1644 1645 /* for this test we will create the following conditions to hit the code path where 1646 * we are trying to send and IO following a split that has no iovs because we had to 1647 * trim them for alignment reasons. 1648 * 1649 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1650 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1651 * position 30 and overshoot by 0x2e. 1652 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1653 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1654 * which eliniates that vector so we just send the first split IO with 30 vectors 1655 * and let the completion pick up the last 2 vectors. 1656 */ 1657 bdev->optimal_io_boundary = 32; 1658 bdev->split_on_optimal_io_boundary = true; 1659 g_io_done = false; 1660 1661 /* Init all parent IOVs to 0x212 */ 1662 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1663 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1664 iov[i].iov_len = 0x212; 1665 } 1666 1667 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1668 BDEV_IO_NUM_CHILD_IOV - 1); 1669 /* expect 0-29 to be 1:1 with the parent iov */ 1670 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1671 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1672 } 1673 1674 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1675 * where 0x1e is the amount we overshot the 16K boundary 1676 */ 1677 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1678 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1679 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1680 1681 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1682 * shortened that take it to the next boundary and then a final one to get us to 1683 * 0x4200 bytes for the IO. 1684 */ 1685 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1686 BDEV_IO_NUM_CHILD_IOV, 2); 1687 /* position 30 picked up the remaining bytes to the next boundary */ 1688 ut_expected_io_set_iov(expected_io, 0, 1689 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1690 1691 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1692 ut_expected_io_set_iov(expected_io, 1, 1693 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1694 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1695 1696 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1697 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1698 CU_ASSERT(rc == 0); 1699 CU_ASSERT(g_io_done == false); 1700 1701 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1702 stub_complete_io(1); 1703 CU_ASSERT(g_io_done == false); 1704 1705 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1706 stub_complete_io(1); 1707 CU_ASSERT(g_io_done == true); 1708 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1709 1710 spdk_put_io_channel(io_ch); 1711 spdk_bdev_close(desc); 1712 free_bdev(bdev); 1713 spdk_bdev_finish(bdev_fini_cb, NULL); 1714 poll_threads(); 1715 } 1716 1717 static void 1718 bdev_io_max_size_and_segment_split_test(void) 1719 { 1720 struct spdk_bdev *bdev; 1721 struct spdk_bdev_desc *desc = NULL; 1722 struct spdk_io_channel *io_ch; 1723 struct spdk_bdev_opts bdev_opts = {}; 1724 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1725 struct ut_expected_io *expected_io; 1726 uint64_t i; 1727 int rc; 1728 1729 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1730 bdev_opts.bdev_io_pool_size = 512; 1731 bdev_opts.bdev_io_cache_size = 64; 1732 1733 bdev_opts.opts_size = sizeof(bdev_opts); 1734 rc = spdk_bdev_set_opts(&bdev_opts); 1735 CU_ASSERT(rc == 0); 1736 spdk_bdev_initialize(bdev_init_cb, NULL); 1737 1738 bdev = allocate_bdev("bdev0"); 1739 1740 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1741 CU_ASSERT(rc == 0); 1742 SPDK_CU_ASSERT_FATAL(desc != NULL); 1743 io_ch = spdk_bdev_get_io_channel(desc); 1744 CU_ASSERT(io_ch != NULL); 1745 1746 bdev->split_on_optimal_io_boundary = false; 1747 bdev->optimal_io_boundary = 0; 1748 1749 /* Case 0 max_num_segments == 0. 1750 * but segment size 2 * 512 > 512 1751 */ 1752 bdev->max_segment_size = 512; 1753 bdev->max_num_segments = 0; 1754 g_io_done = false; 1755 1756 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1757 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1758 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1759 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1760 1761 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1762 CU_ASSERT(rc == 0); 1763 CU_ASSERT(g_io_done == false); 1764 1765 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1766 stub_complete_io(1); 1767 CU_ASSERT(g_io_done == true); 1768 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1769 1770 /* Case 1 max_segment_size == 0 1771 * but iov num 2 > 1. 1772 */ 1773 bdev->max_segment_size = 0; 1774 bdev->max_num_segments = 1; 1775 g_io_done = false; 1776 1777 iov[0].iov_base = (void *)0x10000; 1778 iov[0].iov_len = 512; 1779 iov[1].iov_base = (void *)0x20000; 1780 iov[1].iov_len = 8 * 512; 1781 1782 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1783 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1784 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1785 1786 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1787 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1788 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1789 1790 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1791 CU_ASSERT(rc == 0); 1792 CU_ASSERT(g_io_done == false); 1793 1794 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1795 stub_complete_io(2); 1796 CU_ASSERT(g_io_done == true); 1797 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1798 1799 /* Test that a non-vector command is split correctly. 1800 * Set up the expected values before calling spdk_bdev_read_blocks 1801 */ 1802 bdev->max_segment_size = 512; 1803 bdev->max_num_segments = 1; 1804 g_io_done = false; 1805 1806 /* Child IO 0 */ 1807 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1808 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1809 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1810 1811 /* Child IO 1 */ 1812 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1813 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1814 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1815 1816 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1817 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1818 CU_ASSERT(rc == 0); 1819 CU_ASSERT(g_io_done == false); 1820 1821 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1822 stub_complete_io(2); 1823 CU_ASSERT(g_io_done == true); 1824 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1825 1826 /* Now set up a more complex, multi-vector command that needs to be split, 1827 * including splitting iovecs. 1828 */ 1829 bdev->max_segment_size = 2 * 512; 1830 bdev->max_num_segments = 1; 1831 g_io_done = false; 1832 1833 iov[0].iov_base = (void *)0x10000; 1834 iov[0].iov_len = 2 * 512; 1835 iov[1].iov_base = (void *)0x20000; 1836 iov[1].iov_len = 4 * 512; 1837 iov[2].iov_base = (void *)0x30000; 1838 iov[2].iov_len = 6 * 512; 1839 1840 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1841 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1842 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1843 1844 /* Split iov[1].size to 2 iov entries then split the segments */ 1845 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1846 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1847 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1848 1849 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1850 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1851 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1852 1853 /* Split iov[2].size to 3 iov entries then split the segments */ 1854 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1855 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1856 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1857 1858 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1859 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1860 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1861 1862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1863 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1864 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1865 1866 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1867 CU_ASSERT(rc == 0); 1868 CU_ASSERT(g_io_done == false); 1869 1870 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 1871 stub_complete_io(6); 1872 CU_ASSERT(g_io_done == true); 1873 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1874 1875 /* Test multi vector command that needs to be split by strip and then needs to be 1876 * split further due to the capacity of parent IO child iovs. 1877 */ 1878 bdev->max_segment_size = 512; 1879 bdev->max_num_segments = 1; 1880 g_io_done = false; 1881 1882 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1883 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1884 iov[i].iov_len = 512 * 2; 1885 } 1886 1887 /* Each input iov.size is split into 2 iovs, 1888 * half of the input iov can fill all child iov entries of a single IO. 1889 */ 1890 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 1891 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 1892 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1893 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1894 1895 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 1896 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1897 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1898 } 1899 1900 /* The remaining iov is split in the second round */ 1901 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1902 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 1903 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 1904 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1905 1906 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 1907 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 1908 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1909 } 1910 1911 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 1912 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1913 CU_ASSERT(rc == 0); 1914 CU_ASSERT(g_io_done == false); 1915 1916 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1917 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1918 CU_ASSERT(g_io_done == false); 1919 1920 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 1921 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 1922 CU_ASSERT(g_io_done == true); 1923 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1924 1925 /* A wrong case, a child IO that is divided does 1926 * not meet the principle of multiples of block size, 1927 * and exits with error 1928 */ 1929 bdev->max_segment_size = 512; 1930 bdev->max_num_segments = 1; 1931 g_io_done = false; 1932 1933 iov[0].iov_base = (void *)0x10000; 1934 iov[0].iov_len = 512 + 256; 1935 iov[1].iov_base = (void *)0x20000; 1936 iov[1].iov_len = 256; 1937 1938 /* iov[0] is split to 512 and 256. 1939 * 256 is less than a block size, and it is found 1940 * in the next round of split that it is the first child IO smaller than 1941 * the block size, so the error exit 1942 */ 1943 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 1944 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 1945 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1946 1947 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 1948 CU_ASSERT(rc == 0); 1949 CU_ASSERT(g_io_done == false); 1950 1951 /* First child IO is OK */ 1952 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1953 stub_complete_io(1); 1954 CU_ASSERT(g_io_done == true); 1955 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1956 1957 /* error exit */ 1958 stub_complete_io(1); 1959 CU_ASSERT(g_io_done == true); 1960 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1961 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1962 1963 /* Test multi vector command that needs to be split by strip and then needs to be 1964 * split further due to the capacity of child iovs. 1965 * 1966 * In this case, the last two iovs need to be split, but it will exceed the capacity 1967 * of child iovs, so it needs to wait until the first batch completed. 1968 */ 1969 bdev->max_segment_size = 512; 1970 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 1971 g_io_done = false; 1972 1973 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1974 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1975 iov[i].iov_len = 512; 1976 } 1977 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1978 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1979 iov[i].iov_len = 512 * 2; 1980 } 1981 1982 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1983 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1984 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 1985 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1986 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1987 } 1988 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 1989 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 1990 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 1991 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1992 1993 /* Child iov entries exceed the max num of parent IO so split it in next round */ 1994 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 1995 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 1996 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 1997 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1998 1999 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2000 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2001 CU_ASSERT(rc == 0); 2002 CU_ASSERT(g_io_done == false); 2003 2004 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2005 stub_complete_io(1); 2006 CU_ASSERT(g_io_done == false); 2007 2008 /* Next round */ 2009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2010 stub_complete_io(1); 2011 CU_ASSERT(g_io_done == true); 2012 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2013 2014 /* This case is similar to the previous one, but the io composed of 2015 * the last few entries of child iov is not enough for a blocklen, so they 2016 * cannot be put into this IO, but wait until the next time. 2017 */ 2018 bdev->max_segment_size = 512; 2019 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2020 g_io_done = false; 2021 2022 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2023 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2024 iov[i].iov_len = 512; 2025 } 2026 2027 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2028 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2029 iov[i].iov_len = 128; 2030 } 2031 2032 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2033 * Because the left 2 iov is not enough for a blocklen. 2034 */ 2035 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2036 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2037 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2038 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2039 } 2040 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2041 2042 /* The second child io waits until the end of the first child io before executing. 2043 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2044 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2045 */ 2046 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2047 1, 4); 2048 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2049 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2050 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2051 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2052 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2053 2054 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2055 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2056 CU_ASSERT(rc == 0); 2057 CU_ASSERT(g_io_done == false); 2058 2059 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2060 stub_complete_io(1); 2061 CU_ASSERT(g_io_done == false); 2062 2063 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2064 stub_complete_io(1); 2065 CU_ASSERT(g_io_done == true); 2066 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2067 2068 /* A very complicated case. Each sg entry exceeds max_segment_size and 2069 * needs to be split. At the same time, child io must be a multiple of blocklen. 2070 * At the same time, child iovcnt exceeds parent iovcnt. 2071 */ 2072 bdev->max_segment_size = 512 + 128; 2073 bdev->max_num_segments = 3; 2074 g_io_done = false; 2075 2076 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2077 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2078 iov[i].iov_len = 512 + 256; 2079 } 2080 2081 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2082 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2083 iov[i].iov_len = 512 + 128; 2084 } 2085 2086 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2087 * Consume 4 parent IO iov entries per for() round and 6 block size. 2088 * Generate 9 child IOs. 2089 */ 2090 for (i = 0; i < 3; i++) { 2091 uint32_t j = i * 4; 2092 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2093 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2094 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2095 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2096 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2097 2098 /* Child io must be a multiple of blocklen 2099 * iov[j + 2] must be split. If the third entry is also added, 2100 * the multiple of blocklen cannot be guaranteed. But it still 2101 * occupies one iov entry of the parent child iov. 2102 */ 2103 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2104 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2105 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2106 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2107 2108 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2109 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2110 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2111 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2112 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2113 } 2114 2115 /* Child iov position at 27, the 10th child IO 2116 * iov entry index is 3 * 4 and offset is 3 * 6 2117 */ 2118 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2119 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2120 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2121 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2122 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2123 2124 /* Child iov position at 30, the 11th child IO */ 2125 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2126 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2127 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2128 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2129 2130 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2131 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2132 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2133 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2134 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2135 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2136 2137 /* Consume 9 child IOs and 27 child iov entries. 2138 * Consume 4 parent IO iov entries per for() round and 6 block size. 2139 * Parent IO iov index start from 16 and block offset start from 24 2140 */ 2141 for (i = 0; i < 3; i++) { 2142 uint32_t j = i * 4 + 16; 2143 uint32_t offset = i * 6 + 24; 2144 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2145 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2146 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2147 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2148 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2149 2150 /* Child io must be a multiple of blocklen 2151 * iov[j + 2] must be split. If the third entry is also added, 2152 * the multiple of blocklen cannot be guaranteed. But it still 2153 * occupies one iov entry of the parent child iov. 2154 */ 2155 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2156 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2157 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2158 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2159 2160 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2161 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2162 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2163 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2164 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2165 } 2166 2167 /* The 22th child IO, child iov position at 30 */ 2168 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2169 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2170 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2171 2172 /* The third round */ 2173 /* Here is the 23nd child IO and child iovpos is 0 */ 2174 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2175 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2176 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2177 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2178 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2179 2180 /* The 24th child IO */ 2181 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2182 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2183 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2184 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2185 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2186 2187 /* The 25th child IO */ 2188 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2189 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2190 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2191 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2192 2193 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2194 50, io_done, NULL); 2195 CU_ASSERT(rc == 0); 2196 CU_ASSERT(g_io_done == false); 2197 2198 /* Parent IO supports up to 32 child iovs, so it is calculated that 2199 * a maximum of 11 IOs can be split at a time, and the 2200 * splitting will continue after the first batch is over. 2201 */ 2202 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2203 stub_complete_io(11); 2204 CU_ASSERT(g_io_done == false); 2205 2206 /* The 2nd round */ 2207 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2208 stub_complete_io(11); 2209 CU_ASSERT(g_io_done == false); 2210 2211 /* The last round */ 2212 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2213 stub_complete_io(3); 2214 CU_ASSERT(g_io_done == true); 2215 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2216 2217 /* Test an WRITE_ZEROES. This should also not be split. */ 2218 bdev->max_segment_size = 512; 2219 bdev->max_num_segments = 1; 2220 g_io_done = false; 2221 2222 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2223 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2224 2225 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2226 CU_ASSERT(rc == 0); 2227 CU_ASSERT(g_io_done == false); 2228 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2229 stub_complete_io(1); 2230 CU_ASSERT(g_io_done == true); 2231 2232 /* Test an UNMAP. This should also not be split. */ 2233 g_io_done = false; 2234 2235 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2236 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2237 2238 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2239 CU_ASSERT(rc == 0); 2240 CU_ASSERT(g_io_done == false); 2241 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2242 stub_complete_io(1); 2243 CU_ASSERT(g_io_done == true); 2244 2245 /* Test a FLUSH. This should also not be split. */ 2246 g_io_done = false; 2247 2248 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2249 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2250 2251 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2252 CU_ASSERT(rc == 0); 2253 CU_ASSERT(g_io_done == false); 2254 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2255 stub_complete_io(1); 2256 CU_ASSERT(g_io_done == true); 2257 2258 spdk_put_io_channel(io_ch); 2259 spdk_bdev_close(desc); 2260 free_bdev(bdev); 2261 spdk_bdev_finish(bdev_fini_cb, NULL); 2262 poll_threads(); 2263 } 2264 2265 static void 2266 bdev_io_mix_split_test(void) 2267 { 2268 struct spdk_bdev *bdev; 2269 struct spdk_bdev_desc *desc = NULL; 2270 struct spdk_io_channel *io_ch; 2271 struct spdk_bdev_opts bdev_opts = {}; 2272 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2273 struct ut_expected_io *expected_io; 2274 uint64_t i; 2275 int rc; 2276 2277 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2278 bdev_opts.bdev_io_pool_size = 512; 2279 bdev_opts.bdev_io_cache_size = 64; 2280 2281 rc = spdk_bdev_set_opts(&bdev_opts); 2282 CU_ASSERT(rc == 0); 2283 spdk_bdev_initialize(bdev_init_cb, NULL); 2284 2285 bdev = allocate_bdev("bdev0"); 2286 2287 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2288 CU_ASSERT(rc == 0); 2289 SPDK_CU_ASSERT_FATAL(desc != NULL); 2290 io_ch = spdk_bdev_get_io_channel(desc); 2291 CU_ASSERT(io_ch != NULL); 2292 2293 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2294 bdev->split_on_optimal_io_boundary = true; 2295 bdev->optimal_io_boundary = 16; 2296 2297 bdev->max_segment_size = 512; 2298 bdev->max_num_segments = 16; 2299 g_io_done = false; 2300 2301 /* IO crossing the IO boundary requires split 2302 * Total 2 child IOs. 2303 */ 2304 2305 /* The 1st child IO split the segment_size to multiple segment entry */ 2306 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2307 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2308 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2309 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2310 2311 /* The 2nd child IO split the segment_size to multiple segment entry */ 2312 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2313 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2314 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2315 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2316 2317 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2318 CU_ASSERT(rc == 0); 2319 CU_ASSERT(g_io_done == false); 2320 2321 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2322 stub_complete_io(2); 2323 CU_ASSERT(g_io_done == true); 2324 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2325 2326 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2327 bdev->max_segment_size = 15 * 512; 2328 bdev->max_num_segments = 1; 2329 g_io_done = false; 2330 2331 /* IO crossing the IO boundary requires split. 2332 * The 1st child IO segment size exceeds the max_segment_size, 2333 * So 1st child IO will be splitted to multiple segment entry. 2334 * Then it split to 2 child IOs because of the max_num_segments. 2335 * Total 3 child IOs. 2336 */ 2337 2338 /* The first 2 IOs are in an IO boundary. 2339 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2340 * So it split to the first 2 IOs. 2341 */ 2342 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2343 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2344 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2345 2346 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2347 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2348 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2349 2350 /* The 3rd Child IO is because of the io boundary */ 2351 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2352 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2353 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2354 2355 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2356 CU_ASSERT(rc == 0); 2357 CU_ASSERT(g_io_done == false); 2358 2359 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2360 stub_complete_io(3); 2361 CU_ASSERT(g_io_done == true); 2362 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2363 2364 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2365 bdev->max_segment_size = 17 * 512; 2366 bdev->max_num_segments = 1; 2367 g_io_done = false; 2368 2369 /* IO crossing the IO boundary requires split. 2370 * Child IO does not split. 2371 * Total 2 child IOs. 2372 */ 2373 2374 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2375 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2376 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2377 2378 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2379 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2380 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2381 2382 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2383 CU_ASSERT(rc == 0); 2384 CU_ASSERT(g_io_done == false); 2385 2386 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2387 stub_complete_io(2); 2388 CU_ASSERT(g_io_done == true); 2389 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2390 2391 /* Now set up a more complex, multi-vector command that needs to be split, 2392 * including splitting iovecs. 2393 * optimal_io_boundary < max_segment_size * max_num_segments 2394 */ 2395 bdev->max_segment_size = 3 * 512; 2396 bdev->max_num_segments = 6; 2397 g_io_done = false; 2398 2399 iov[0].iov_base = (void *)0x10000; 2400 iov[0].iov_len = 4 * 512; 2401 iov[1].iov_base = (void *)0x20000; 2402 iov[1].iov_len = 4 * 512; 2403 iov[2].iov_base = (void *)0x30000; 2404 iov[2].iov_len = 10 * 512; 2405 2406 /* IO crossing the IO boundary requires split. 2407 * The 1st child IO segment size exceeds the max_segment_size and after 2408 * splitting segment_size, the num_segments exceeds max_num_segments. 2409 * So 1st child IO will be splitted to 2 child IOs. 2410 * Total 3 child IOs. 2411 */ 2412 2413 /* The first 2 IOs are in an IO boundary. 2414 * After splitting segmemt size the segment num exceeds. 2415 * So it splits to 2 child IOs. 2416 */ 2417 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2418 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2419 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2420 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2421 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2422 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2423 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2425 2426 /* The 2nd child IO has the left segment entry */ 2427 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2428 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2429 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2430 2431 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2432 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2433 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2434 2435 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2436 CU_ASSERT(rc == 0); 2437 CU_ASSERT(g_io_done == false); 2438 2439 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2440 stub_complete_io(3); 2441 CU_ASSERT(g_io_done == true); 2442 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2443 2444 /* A very complicated case. Each sg entry exceeds max_segment_size 2445 * and split on io boundary. 2446 * optimal_io_boundary < max_segment_size * max_num_segments 2447 */ 2448 bdev->max_segment_size = 3 * 512; 2449 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2450 g_io_done = false; 2451 2452 for (i = 0; i < 20; i++) { 2453 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2454 iov[i].iov_len = 512 * 4; 2455 } 2456 2457 /* IO crossing the IO boundary requires split. 2458 * 80 block length can split 5 child IOs base on offset and IO boundary. 2459 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2460 * Total 5 child IOs. 2461 */ 2462 2463 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2464 * So each child IO occupies 8 child iov entries. 2465 */ 2466 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2467 for (i = 0; i < 4; i++) { 2468 int iovcnt = i * 2; 2469 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2470 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2471 } 2472 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2473 2474 /* 2nd child IO and total 16 child iov entries of parent IO */ 2475 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2476 for (i = 4; i < 8; i++) { 2477 int iovcnt = (i - 4) * 2; 2478 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2479 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2480 } 2481 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2482 2483 /* 3rd child IO and total 24 child iov entries of parent IO */ 2484 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2485 for (i = 8; i < 12; i++) { 2486 int iovcnt = (i - 8) * 2; 2487 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2488 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2489 } 2490 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2491 2492 /* 4th child IO and total 32 child iov entries of parent IO */ 2493 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2494 for (i = 12; i < 16; i++) { 2495 int iovcnt = (i - 12) * 2; 2496 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2497 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2498 } 2499 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2500 2501 /* 5th child IO and because of the child iov entry it should be splitted 2502 * in next round. 2503 */ 2504 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2505 for (i = 16; i < 20; i++) { 2506 int iovcnt = (i - 16) * 2; 2507 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2508 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2509 } 2510 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2511 2512 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2513 CU_ASSERT(rc == 0); 2514 CU_ASSERT(g_io_done == false); 2515 2516 /* First split round */ 2517 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2518 stub_complete_io(4); 2519 CU_ASSERT(g_io_done == false); 2520 2521 /* Second split round */ 2522 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2523 stub_complete_io(1); 2524 CU_ASSERT(g_io_done == true); 2525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2526 2527 spdk_put_io_channel(io_ch); 2528 spdk_bdev_close(desc); 2529 free_bdev(bdev); 2530 spdk_bdev_finish(bdev_fini_cb, NULL); 2531 poll_threads(); 2532 } 2533 2534 static void 2535 bdev_io_split_with_io_wait(void) 2536 { 2537 struct spdk_bdev *bdev; 2538 struct spdk_bdev_desc *desc = NULL; 2539 struct spdk_io_channel *io_ch; 2540 struct spdk_bdev_channel *channel; 2541 struct spdk_bdev_mgmt_channel *mgmt_ch; 2542 struct spdk_bdev_opts bdev_opts = {}; 2543 struct iovec iov[3]; 2544 struct ut_expected_io *expected_io; 2545 int rc; 2546 2547 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2548 bdev_opts.bdev_io_pool_size = 2; 2549 bdev_opts.bdev_io_cache_size = 1; 2550 2551 rc = spdk_bdev_set_opts(&bdev_opts); 2552 CU_ASSERT(rc == 0); 2553 spdk_bdev_initialize(bdev_init_cb, NULL); 2554 2555 bdev = allocate_bdev("bdev0"); 2556 2557 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2558 CU_ASSERT(rc == 0); 2559 CU_ASSERT(desc != NULL); 2560 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2561 io_ch = spdk_bdev_get_io_channel(desc); 2562 CU_ASSERT(io_ch != NULL); 2563 channel = spdk_io_channel_get_ctx(io_ch); 2564 mgmt_ch = channel->shared_resource->mgmt_ch; 2565 2566 bdev->optimal_io_boundary = 16; 2567 bdev->split_on_optimal_io_boundary = true; 2568 2569 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2570 CU_ASSERT(rc == 0); 2571 2572 /* Now test that a single-vector command is split correctly. 2573 * Offset 14, length 8, payload 0xF000 2574 * Child - Offset 14, length 2, payload 0xF000 2575 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2576 * 2577 * Set up the expected values before calling spdk_bdev_read_blocks 2578 */ 2579 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2580 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2581 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2582 2583 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2584 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2585 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2586 2587 /* The following children will be submitted sequentially due to the capacity of 2588 * spdk_bdev_io. 2589 */ 2590 2591 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2592 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2593 CU_ASSERT(rc == 0); 2594 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2595 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2596 2597 /* Completing the first read I/O will submit the first child */ 2598 stub_complete_io(1); 2599 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2600 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2601 2602 /* Completing the first child will submit the second child */ 2603 stub_complete_io(1); 2604 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2605 2606 /* Complete the second child I/O. This should result in our callback getting 2607 * invoked since the parent I/O is now complete. 2608 */ 2609 stub_complete_io(1); 2610 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2611 2612 /* Now set up a more complex, multi-vector command that needs to be split, 2613 * including splitting iovecs. 2614 */ 2615 iov[0].iov_base = (void *)0x10000; 2616 iov[0].iov_len = 512; 2617 iov[1].iov_base = (void *)0x20000; 2618 iov[1].iov_len = 20 * 512; 2619 iov[2].iov_base = (void *)0x30000; 2620 iov[2].iov_len = 11 * 512; 2621 2622 g_io_done = false; 2623 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2624 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2625 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2626 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2627 2628 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2629 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2630 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2631 2632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2633 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2634 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2635 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2636 2637 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2638 CU_ASSERT(rc == 0); 2639 CU_ASSERT(g_io_done == false); 2640 2641 /* The following children will be submitted sequentially due to the capacity of 2642 * spdk_bdev_io. 2643 */ 2644 2645 /* Completing the first child will submit the second child */ 2646 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2647 stub_complete_io(1); 2648 CU_ASSERT(g_io_done == false); 2649 2650 /* Completing the second child will submit the third child */ 2651 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2652 stub_complete_io(1); 2653 CU_ASSERT(g_io_done == false); 2654 2655 /* Completing the third child will result in our callback getting invoked 2656 * since the parent I/O is now complete. 2657 */ 2658 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2659 stub_complete_io(1); 2660 CU_ASSERT(g_io_done == true); 2661 2662 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2663 2664 spdk_put_io_channel(io_ch); 2665 spdk_bdev_close(desc); 2666 free_bdev(bdev); 2667 spdk_bdev_finish(bdev_fini_cb, NULL); 2668 poll_threads(); 2669 } 2670 2671 static void 2672 bdev_io_alignment(void) 2673 { 2674 struct spdk_bdev *bdev; 2675 struct spdk_bdev_desc *desc = NULL; 2676 struct spdk_io_channel *io_ch; 2677 struct spdk_bdev_opts bdev_opts = {}; 2678 int rc; 2679 void *buf = NULL; 2680 struct iovec iovs[2]; 2681 int iovcnt; 2682 uint64_t alignment; 2683 2684 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2685 bdev_opts.bdev_io_pool_size = 20; 2686 bdev_opts.bdev_io_cache_size = 2; 2687 2688 rc = spdk_bdev_set_opts(&bdev_opts); 2689 CU_ASSERT(rc == 0); 2690 spdk_bdev_initialize(bdev_init_cb, NULL); 2691 2692 fn_table.submit_request = stub_submit_request_get_buf; 2693 bdev = allocate_bdev("bdev0"); 2694 2695 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2696 CU_ASSERT(rc == 0); 2697 CU_ASSERT(desc != NULL); 2698 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2699 io_ch = spdk_bdev_get_io_channel(desc); 2700 CU_ASSERT(io_ch != NULL); 2701 2702 /* Create aligned buffer */ 2703 rc = posix_memalign(&buf, 4096, 8192); 2704 SPDK_CU_ASSERT_FATAL(rc == 0); 2705 2706 /* Pass aligned single buffer with no alignment required */ 2707 alignment = 1; 2708 bdev->required_alignment = spdk_u32log2(alignment); 2709 2710 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2711 CU_ASSERT(rc == 0); 2712 stub_complete_io(1); 2713 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2714 alignment)); 2715 2716 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2717 CU_ASSERT(rc == 0); 2718 stub_complete_io(1); 2719 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2720 alignment)); 2721 2722 /* Pass unaligned single buffer with no alignment required */ 2723 alignment = 1; 2724 bdev->required_alignment = spdk_u32log2(alignment); 2725 2726 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2727 CU_ASSERT(rc == 0); 2728 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2729 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2730 stub_complete_io(1); 2731 2732 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2733 CU_ASSERT(rc == 0); 2734 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2735 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2736 stub_complete_io(1); 2737 2738 /* Pass unaligned single buffer with 512 alignment required */ 2739 alignment = 512; 2740 bdev->required_alignment = spdk_u32log2(alignment); 2741 2742 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2743 CU_ASSERT(rc == 0); 2744 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2745 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2746 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2747 alignment)); 2748 stub_complete_io(1); 2749 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2750 2751 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2752 CU_ASSERT(rc == 0); 2753 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2754 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2755 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2756 alignment)); 2757 stub_complete_io(1); 2758 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2759 2760 /* Pass unaligned single buffer with 4096 alignment required */ 2761 alignment = 4096; 2762 bdev->required_alignment = spdk_u32log2(alignment); 2763 2764 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2765 CU_ASSERT(rc == 0); 2766 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2767 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2768 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2769 alignment)); 2770 stub_complete_io(1); 2771 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2772 2773 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2774 CU_ASSERT(rc == 0); 2775 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2776 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2777 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2778 alignment)); 2779 stub_complete_io(1); 2780 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2781 2782 /* Pass aligned iovs with no alignment required */ 2783 alignment = 1; 2784 bdev->required_alignment = spdk_u32log2(alignment); 2785 2786 iovcnt = 1; 2787 iovs[0].iov_base = buf; 2788 iovs[0].iov_len = 512; 2789 2790 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2791 CU_ASSERT(rc == 0); 2792 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2793 stub_complete_io(1); 2794 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2795 2796 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2797 CU_ASSERT(rc == 0); 2798 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2799 stub_complete_io(1); 2800 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2801 2802 /* Pass unaligned iovs with no alignment required */ 2803 alignment = 1; 2804 bdev->required_alignment = spdk_u32log2(alignment); 2805 2806 iovcnt = 2; 2807 iovs[0].iov_base = buf + 16; 2808 iovs[0].iov_len = 256; 2809 iovs[1].iov_base = buf + 16 + 256 + 32; 2810 iovs[1].iov_len = 256; 2811 2812 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2813 CU_ASSERT(rc == 0); 2814 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2815 stub_complete_io(1); 2816 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2817 2818 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2819 CU_ASSERT(rc == 0); 2820 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2821 stub_complete_io(1); 2822 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2823 2824 /* Pass unaligned iov with 2048 alignment required */ 2825 alignment = 2048; 2826 bdev->required_alignment = spdk_u32log2(alignment); 2827 2828 iovcnt = 2; 2829 iovs[0].iov_base = buf + 16; 2830 iovs[0].iov_len = 256; 2831 iovs[1].iov_base = buf + 16 + 256 + 32; 2832 iovs[1].iov_len = 256; 2833 2834 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2835 CU_ASSERT(rc == 0); 2836 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2837 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2838 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2839 alignment)); 2840 stub_complete_io(1); 2841 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2842 2843 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2844 CU_ASSERT(rc == 0); 2845 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2846 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2847 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2848 alignment)); 2849 stub_complete_io(1); 2850 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2851 2852 /* Pass iov without allocated buffer without alignment required */ 2853 alignment = 1; 2854 bdev->required_alignment = spdk_u32log2(alignment); 2855 2856 iovcnt = 1; 2857 iovs[0].iov_base = NULL; 2858 iovs[0].iov_len = 0; 2859 2860 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2861 CU_ASSERT(rc == 0); 2862 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2863 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2864 alignment)); 2865 stub_complete_io(1); 2866 2867 /* Pass iov without allocated buffer with 1024 alignment required */ 2868 alignment = 1024; 2869 bdev->required_alignment = spdk_u32log2(alignment); 2870 2871 iovcnt = 1; 2872 iovs[0].iov_base = NULL; 2873 iovs[0].iov_len = 0; 2874 2875 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2876 CU_ASSERT(rc == 0); 2877 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2878 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2879 alignment)); 2880 stub_complete_io(1); 2881 2882 spdk_put_io_channel(io_ch); 2883 spdk_bdev_close(desc); 2884 free_bdev(bdev); 2885 fn_table.submit_request = stub_submit_request; 2886 spdk_bdev_finish(bdev_fini_cb, NULL); 2887 poll_threads(); 2888 2889 free(buf); 2890 } 2891 2892 static void 2893 bdev_io_alignment_with_boundary(void) 2894 { 2895 struct spdk_bdev *bdev; 2896 struct spdk_bdev_desc *desc = NULL; 2897 struct spdk_io_channel *io_ch; 2898 struct spdk_bdev_opts bdev_opts = {}; 2899 int rc; 2900 void *buf = NULL; 2901 struct iovec iovs[2]; 2902 int iovcnt; 2903 uint64_t alignment; 2904 2905 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2906 bdev_opts.bdev_io_pool_size = 20; 2907 bdev_opts.bdev_io_cache_size = 2; 2908 2909 bdev_opts.opts_size = sizeof(bdev_opts); 2910 rc = spdk_bdev_set_opts(&bdev_opts); 2911 CU_ASSERT(rc == 0); 2912 spdk_bdev_initialize(bdev_init_cb, NULL); 2913 2914 fn_table.submit_request = stub_submit_request_get_buf; 2915 bdev = allocate_bdev("bdev0"); 2916 2917 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2918 CU_ASSERT(rc == 0); 2919 CU_ASSERT(desc != NULL); 2920 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2921 io_ch = spdk_bdev_get_io_channel(desc); 2922 CU_ASSERT(io_ch != NULL); 2923 2924 /* Create aligned buffer */ 2925 rc = posix_memalign(&buf, 4096, 131072); 2926 SPDK_CU_ASSERT_FATAL(rc == 0); 2927 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2928 2929 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2930 alignment = 512; 2931 bdev->required_alignment = spdk_u32log2(alignment); 2932 bdev->optimal_io_boundary = 2; 2933 bdev->split_on_optimal_io_boundary = true; 2934 2935 iovcnt = 1; 2936 iovs[0].iov_base = NULL; 2937 iovs[0].iov_len = 512 * 3; 2938 2939 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2940 CU_ASSERT(rc == 0); 2941 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2942 stub_complete_io(2); 2943 2944 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2945 alignment = 512; 2946 bdev->required_alignment = spdk_u32log2(alignment); 2947 bdev->optimal_io_boundary = 16; 2948 bdev->split_on_optimal_io_boundary = true; 2949 2950 iovcnt = 1; 2951 iovs[0].iov_base = NULL; 2952 iovs[0].iov_len = 512 * 16; 2953 2954 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2955 CU_ASSERT(rc == 0); 2956 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2957 stub_complete_io(2); 2958 2959 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2960 alignment = 512; 2961 bdev->required_alignment = spdk_u32log2(alignment); 2962 bdev->optimal_io_boundary = 128; 2963 bdev->split_on_optimal_io_boundary = true; 2964 2965 iovcnt = 1; 2966 iovs[0].iov_base = buf + 16; 2967 iovs[0].iov_len = 512 * 160; 2968 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2969 CU_ASSERT(rc == 0); 2970 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2971 stub_complete_io(2); 2972 2973 /* 512 * 3 with 2 IO boundary */ 2974 alignment = 512; 2975 bdev->required_alignment = spdk_u32log2(alignment); 2976 bdev->optimal_io_boundary = 2; 2977 bdev->split_on_optimal_io_boundary = true; 2978 2979 iovcnt = 2; 2980 iovs[0].iov_base = buf + 16; 2981 iovs[0].iov_len = 512; 2982 iovs[1].iov_base = buf + 16 + 512 + 32; 2983 iovs[1].iov_len = 1024; 2984 2985 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2986 CU_ASSERT(rc == 0); 2987 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2988 stub_complete_io(2); 2989 2990 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2991 CU_ASSERT(rc == 0); 2992 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2993 stub_complete_io(2); 2994 2995 /* 512 * 64 with 32 IO boundary */ 2996 bdev->optimal_io_boundary = 32; 2997 iovcnt = 2; 2998 iovs[0].iov_base = buf + 16; 2999 iovs[0].iov_len = 16384; 3000 iovs[1].iov_base = buf + 16 + 16384 + 32; 3001 iovs[1].iov_len = 16384; 3002 3003 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3004 CU_ASSERT(rc == 0); 3005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3006 stub_complete_io(3); 3007 3008 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3009 CU_ASSERT(rc == 0); 3010 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3011 stub_complete_io(3); 3012 3013 /* 512 * 160 with 32 IO boundary */ 3014 iovcnt = 1; 3015 iovs[0].iov_base = buf + 16; 3016 iovs[0].iov_len = 16384 + 65536; 3017 3018 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3019 CU_ASSERT(rc == 0); 3020 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3021 stub_complete_io(6); 3022 3023 spdk_put_io_channel(io_ch); 3024 spdk_bdev_close(desc); 3025 free_bdev(bdev); 3026 fn_table.submit_request = stub_submit_request; 3027 spdk_bdev_finish(bdev_fini_cb, NULL); 3028 poll_threads(); 3029 3030 free(buf); 3031 } 3032 3033 static void 3034 histogram_status_cb(void *cb_arg, int status) 3035 { 3036 g_status = status; 3037 } 3038 3039 static void 3040 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3041 { 3042 g_status = status; 3043 g_histogram = histogram; 3044 } 3045 3046 static void 3047 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3048 uint64_t total, uint64_t so_far) 3049 { 3050 g_count += count; 3051 } 3052 3053 static void 3054 bdev_histograms(void) 3055 { 3056 struct spdk_bdev *bdev; 3057 struct spdk_bdev_desc *desc = NULL; 3058 struct spdk_io_channel *ch; 3059 struct spdk_histogram_data *histogram; 3060 uint8_t buf[4096]; 3061 int rc; 3062 3063 spdk_bdev_initialize(bdev_init_cb, NULL); 3064 3065 bdev = allocate_bdev("bdev"); 3066 3067 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3068 CU_ASSERT(rc == 0); 3069 CU_ASSERT(desc != NULL); 3070 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3071 3072 ch = spdk_bdev_get_io_channel(desc); 3073 CU_ASSERT(ch != NULL); 3074 3075 /* Enable histogram */ 3076 g_status = -1; 3077 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3078 poll_threads(); 3079 CU_ASSERT(g_status == 0); 3080 CU_ASSERT(bdev->internal.histogram_enabled == true); 3081 3082 /* Allocate histogram */ 3083 histogram = spdk_histogram_data_alloc(); 3084 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3085 3086 /* Check if histogram is zeroed */ 3087 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3088 poll_threads(); 3089 CU_ASSERT(g_status == 0); 3090 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3091 3092 g_count = 0; 3093 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3094 3095 CU_ASSERT(g_count == 0); 3096 3097 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3098 CU_ASSERT(rc == 0); 3099 3100 spdk_delay_us(10); 3101 stub_complete_io(1); 3102 poll_threads(); 3103 3104 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3105 CU_ASSERT(rc == 0); 3106 3107 spdk_delay_us(10); 3108 stub_complete_io(1); 3109 poll_threads(); 3110 3111 /* Check if histogram gathered data from all I/O channels */ 3112 g_histogram = NULL; 3113 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3114 poll_threads(); 3115 CU_ASSERT(g_status == 0); 3116 CU_ASSERT(bdev->internal.histogram_enabled == true); 3117 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3118 3119 g_count = 0; 3120 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3121 CU_ASSERT(g_count == 2); 3122 3123 /* Disable histogram */ 3124 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3125 poll_threads(); 3126 CU_ASSERT(g_status == 0); 3127 CU_ASSERT(bdev->internal.histogram_enabled == false); 3128 3129 /* Try to run histogram commands on disabled bdev */ 3130 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3131 poll_threads(); 3132 CU_ASSERT(g_status == -EFAULT); 3133 3134 spdk_histogram_data_free(histogram); 3135 spdk_put_io_channel(ch); 3136 spdk_bdev_close(desc); 3137 free_bdev(bdev); 3138 spdk_bdev_finish(bdev_fini_cb, NULL); 3139 poll_threads(); 3140 } 3141 3142 static void 3143 _bdev_compare(bool emulated) 3144 { 3145 struct spdk_bdev *bdev; 3146 struct spdk_bdev_desc *desc = NULL; 3147 struct spdk_io_channel *ioch; 3148 struct ut_expected_io *expected_io; 3149 uint64_t offset, num_blocks; 3150 uint32_t num_completed; 3151 char aa_buf[512]; 3152 char bb_buf[512]; 3153 struct iovec compare_iov; 3154 uint8_t io_type; 3155 int rc; 3156 3157 if (emulated) { 3158 io_type = SPDK_BDEV_IO_TYPE_READ; 3159 } else { 3160 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3161 } 3162 3163 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3164 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3165 3166 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3167 3168 spdk_bdev_initialize(bdev_init_cb, NULL); 3169 fn_table.submit_request = stub_submit_request_get_buf; 3170 bdev = allocate_bdev("bdev"); 3171 3172 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3173 CU_ASSERT_EQUAL(rc, 0); 3174 SPDK_CU_ASSERT_FATAL(desc != NULL); 3175 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3176 ioch = spdk_bdev_get_io_channel(desc); 3177 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3178 3179 fn_table.submit_request = stub_submit_request_get_buf; 3180 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3181 3182 offset = 50; 3183 num_blocks = 1; 3184 compare_iov.iov_base = aa_buf; 3185 compare_iov.iov_len = sizeof(aa_buf); 3186 3187 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3188 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3189 3190 g_io_done = false; 3191 g_compare_read_buf = aa_buf; 3192 g_compare_read_buf_len = sizeof(aa_buf); 3193 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3194 CU_ASSERT_EQUAL(rc, 0); 3195 num_completed = stub_complete_io(1); 3196 CU_ASSERT_EQUAL(num_completed, 1); 3197 CU_ASSERT(g_io_done == true); 3198 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3199 3200 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3201 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3202 3203 g_io_done = false; 3204 g_compare_read_buf = bb_buf; 3205 g_compare_read_buf_len = sizeof(bb_buf); 3206 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3207 CU_ASSERT_EQUAL(rc, 0); 3208 num_completed = stub_complete_io(1); 3209 CU_ASSERT_EQUAL(num_completed, 1); 3210 CU_ASSERT(g_io_done == true); 3211 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3212 3213 spdk_put_io_channel(ioch); 3214 spdk_bdev_close(desc); 3215 free_bdev(bdev); 3216 fn_table.submit_request = stub_submit_request; 3217 spdk_bdev_finish(bdev_fini_cb, NULL); 3218 poll_threads(); 3219 3220 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3221 3222 g_compare_read_buf = NULL; 3223 } 3224 3225 static void 3226 bdev_compare(void) 3227 { 3228 _bdev_compare(true); 3229 _bdev_compare(false); 3230 } 3231 3232 static void 3233 bdev_compare_and_write(void) 3234 { 3235 struct spdk_bdev *bdev; 3236 struct spdk_bdev_desc *desc = NULL; 3237 struct spdk_io_channel *ioch; 3238 struct ut_expected_io *expected_io; 3239 uint64_t offset, num_blocks; 3240 uint32_t num_completed; 3241 char aa_buf[512]; 3242 char bb_buf[512]; 3243 char cc_buf[512]; 3244 char write_buf[512]; 3245 struct iovec compare_iov; 3246 struct iovec write_iov; 3247 int rc; 3248 3249 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3250 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3251 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3252 3253 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3254 3255 spdk_bdev_initialize(bdev_init_cb, NULL); 3256 fn_table.submit_request = stub_submit_request_get_buf; 3257 bdev = allocate_bdev("bdev"); 3258 3259 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3260 CU_ASSERT_EQUAL(rc, 0); 3261 SPDK_CU_ASSERT_FATAL(desc != NULL); 3262 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3263 ioch = spdk_bdev_get_io_channel(desc); 3264 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3265 3266 fn_table.submit_request = stub_submit_request_get_buf; 3267 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3268 3269 offset = 50; 3270 num_blocks = 1; 3271 compare_iov.iov_base = aa_buf; 3272 compare_iov.iov_len = sizeof(aa_buf); 3273 write_iov.iov_base = bb_buf; 3274 write_iov.iov_len = sizeof(bb_buf); 3275 3276 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3277 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3278 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3279 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3280 3281 g_io_done = false; 3282 g_compare_read_buf = aa_buf; 3283 g_compare_read_buf_len = sizeof(aa_buf); 3284 memset(write_buf, 0, sizeof(write_buf)); 3285 g_compare_write_buf = write_buf; 3286 g_compare_write_buf_len = sizeof(write_buf); 3287 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3288 offset, num_blocks, io_done, NULL); 3289 /* Trigger range locking */ 3290 poll_threads(); 3291 CU_ASSERT_EQUAL(rc, 0); 3292 num_completed = stub_complete_io(1); 3293 CU_ASSERT_EQUAL(num_completed, 1); 3294 CU_ASSERT(g_io_done == false); 3295 num_completed = stub_complete_io(1); 3296 /* Trigger range unlocking */ 3297 poll_threads(); 3298 CU_ASSERT_EQUAL(num_completed, 1); 3299 CU_ASSERT(g_io_done == true); 3300 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3301 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3302 3303 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3304 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3305 3306 g_io_done = false; 3307 g_compare_read_buf = cc_buf; 3308 g_compare_read_buf_len = sizeof(cc_buf); 3309 memset(write_buf, 0, sizeof(write_buf)); 3310 g_compare_write_buf = write_buf; 3311 g_compare_write_buf_len = sizeof(write_buf); 3312 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3313 offset, num_blocks, io_done, NULL); 3314 /* Trigger range locking */ 3315 poll_threads(); 3316 CU_ASSERT_EQUAL(rc, 0); 3317 num_completed = stub_complete_io(1); 3318 /* Trigger range unlocking earlier because we expect error here */ 3319 poll_threads(); 3320 CU_ASSERT_EQUAL(num_completed, 1); 3321 CU_ASSERT(g_io_done == true); 3322 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3323 num_completed = stub_complete_io(1); 3324 CU_ASSERT_EQUAL(num_completed, 0); 3325 3326 spdk_put_io_channel(ioch); 3327 spdk_bdev_close(desc); 3328 free_bdev(bdev); 3329 fn_table.submit_request = stub_submit_request; 3330 spdk_bdev_finish(bdev_fini_cb, NULL); 3331 poll_threads(); 3332 3333 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3334 3335 g_compare_read_buf = NULL; 3336 g_compare_write_buf = NULL; 3337 } 3338 3339 static void 3340 bdev_write_zeroes(void) 3341 { 3342 struct spdk_bdev *bdev; 3343 struct spdk_bdev_desc *desc = NULL; 3344 struct spdk_io_channel *ioch; 3345 struct ut_expected_io *expected_io; 3346 uint64_t offset, num_io_blocks, num_blocks; 3347 uint32_t num_completed, num_requests; 3348 int rc; 3349 3350 spdk_bdev_initialize(bdev_init_cb, NULL); 3351 bdev = allocate_bdev("bdev"); 3352 3353 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3354 CU_ASSERT_EQUAL(rc, 0); 3355 SPDK_CU_ASSERT_FATAL(desc != NULL); 3356 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3357 ioch = spdk_bdev_get_io_channel(desc); 3358 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3359 3360 fn_table.submit_request = stub_submit_request; 3361 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3362 3363 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3364 bdev->md_len = 0; 3365 bdev->blocklen = 4096; 3366 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3367 3368 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3369 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3370 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3371 CU_ASSERT_EQUAL(rc, 0); 3372 num_completed = stub_complete_io(1); 3373 CU_ASSERT_EQUAL(num_completed, 1); 3374 3375 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3376 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3377 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3378 num_requests = 2; 3379 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3380 3381 for (offset = 0; offset < num_requests; ++offset) { 3382 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3383 offset * num_io_blocks, num_io_blocks, 0); 3384 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3385 } 3386 3387 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3388 CU_ASSERT_EQUAL(rc, 0); 3389 num_completed = stub_complete_io(num_requests); 3390 CU_ASSERT_EQUAL(num_completed, num_requests); 3391 3392 /* Check that the splitting is correct if bdev has interleaved metadata */ 3393 bdev->md_interleave = true; 3394 bdev->md_len = 64; 3395 bdev->blocklen = 4096 + 64; 3396 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3397 3398 num_requests = offset = 0; 3399 while (offset < num_blocks) { 3400 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3401 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3402 offset, num_io_blocks, 0); 3403 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3404 offset += num_io_blocks; 3405 num_requests++; 3406 } 3407 3408 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3409 CU_ASSERT_EQUAL(rc, 0); 3410 num_completed = stub_complete_io(num_requests); 3411 CU_ASSERT_EQUAL(num_completed, num_requests); 3412 num_completed = stub_complete_io(num_requests); 3413 assert(num_completed == 0); 3414 3415 /* Check the the same for separate metadata buffer */ 3416 bdev->md_interleave = false; 3417 bdev->md_len = 64; 3418 bdev->blocklen = 4096; 3419 3420 num_requests = offset = 0; 3421 while (offset < num_blocks) { 3422 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3423 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3424 offset, num_io_blocks, 0); 3425 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3426 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3427 offset += num_io_blocks; 3428 num_requests++; 3429 } 3430 3431 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3432 CU_ASSERT_EQUAL(rc, 0); 3433 num_completed = stub_complete_io(num_requests); 3434 CU_ASSERT_EQUAL(num_completed, num_requests); 3435 3436 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3437 spdk_put_io_channel(ioch); 3438 spdk_bdev_close(desc); 3439 free_bdev(bdev); 3440 spdk_bdev_finish(bdev_fini_cb, NULL); 3441 poll_threads(); 3442 } 3443 3444 static void 3445 bdev_open_while_hotremove(void) 3446 { 3447 struct spdk_bdev *bdev; 3448 struct spdk_bdev_desc *desc[2] = {}; 3449 int rc; 3450 3451 bdev = allocate_bdev("bdev"); 3452 3453 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3454 CU_ASSERT(rc == 0); 3455 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3456 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3457 3458 spdk_bdev_unregister(bdev, NULL, NULL); 3459 3460 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3461 CU_ASSERT(rc == -ENODEV); 3462 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3463 3464 spdk_bdev_close(desc[0]); 3465 free_bdev(bdev); 3466 } 3467 3468 static void 3469 bdev_close_while_hotremove(void) 3470 { 3471 struct spdk_bdev *bdev; 3472 struct spdk_bdev_desc *desc = NULL; 3473 int rc = 0; 3474 3475 bdev = allocate_bdev("bdev"); 3476 3477 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3478 CU_ASSERT_EQUAL(rc, 0); 3479 SPDK_CU_ASSERT_FATAL(desc != NULL); 3480 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3481 3482 /* Simulate hot-unplug by unregistering bdev */ 3483 g_event_type1 = 0xFF; 3484 g_unregister_arg = NULL; 3485 g_unregister_rc = -1; 3486 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3487 /* Close device while remove event is in flight */ 3488 spdk_bdev_close(desc); 3489 3490 /* Ensure that unregister callback is delayed */ 3491 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3492 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3493 3494 poll_threads(); 3495 3496 /* Event callback shall not be issued because device was closed */ 3497 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3498 /* Unregister callback is issued */ 3499 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3500 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3501 3502 free_bdev(bdev); 3503 } 3504 3505 static void 3506 bdev_open_ext(void) 3507 { 3508 struct spdk_bdev *bdev; 3509 struct spdk_bdev_desc *desc1 = NULL; 3510 struct spdk_bdev_desc *desc2 = NULL; 3511 int rc = 0; 3512 3513 bdev = allocate_bdev("bdev"); 3514 3515 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3516 CU_ASSERT_EQUAL(rc, -EINVAL); 3517 3518 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3519 CU_ASSERT_EQUAL(rc, 0); 3520 3521 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3522 CU_ASSERT_EQUAL(rc, 0); 3523 3524 g_event_type1 = 0xFF; 3525 g_event_type2 = 0xFF; 3526 3527 /* Simulate hot-unplug by unregistering bdev */ 3528 spdk_bdev_unregister(bdev, NULL, NULL); 3529 poll_threads(); 3530 3531 /* Check if correct events have been triggered in event callback fn */ 3532 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3533 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3534 3535 free_bdev(bdev); 3536 poll_threads(); 3537 } 3538 3539 struct timeout_io_cb_arg { 3540 struct iovec iov; 3541 uint8_t type; 3542 }; 3543 3544 static int 3545 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3546 { 3547 struct spdk_bdev_io *bdev_io; 3548 int n = 0; 3549 3550 if (!ch) { 3551 return -1; 3552 } 3553 3554 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3555 n++; 3556 } 3557 3558 return n; 3559 } 3560 3561 static void 3562 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3563 { 3564 struct timeout_io_cb_arg *ctx = cb_arg; 3565 3566 ctx->type = bdev_io->type; 3567 ctx->iov.iov_base = bdev_io->iov.iov_base; 3568 ctx->iov.iov_len = bdev_io->iov.iov_len; 3569 } 3570 3571 static void 3572 bdev_set_io_timeout(void) 3573 { 3574 struct spdk_bdev *bdev; 3575 struct spdk_bdev_desc *desc = NULL; 3576 struct spdk_io_channel *io_ch = NULL; 3577 struct spdk_bdev_channel *bdev_ch = NULL; 3578 struct timeout_io_cb_arg cb_arg; 3579 3580 spdk_bdev_initialize(bdev_init_cb, NULL); 3581 3582 bdev = allocate_bdev("bdev"); 3583 3584 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3585 SPDK_CU_ASSERT_FATAL(desc != NULL); 3586 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3587 3588 io_ch = spdk_bdev_get_io_channel(desc); 3589 CU_ASSERT(io_ch != NULL); 3590 3591 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3592 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3593 3594 /* This is the part1. 3595 * We will check the bdev_ch->io_submitted list 3596 * TO make sure that it can link IOs and only the user submitted IOs 3597 */ 3598 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3599 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3600 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3601 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3602 stub_complete_io(1); 3603 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3604 stub_complete_io(1); 3605 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3606 3607 /* Split IO */ 3608 bdev->optimal_io_boundary = 16; 3609 bdev->split_on_optimal_io_boundary = true; 3610 3611 /* Now test that a single-vector command is split correctly. 3612 * Offset 14, length 8, payload 0xF000 3613 * Child - Offset 14, length 2, payload 0xF000 3614 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3615 * 3616 * Set up the expected values before calling spdk_bdev_read_blocks 3617 */ 3618 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3619 /* We count all submitted IOs including IO that are generated by splitting. */ 3620 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 3621 stub_complete_io(1); 3622 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3623 stub_complete_io(1); 3624 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3625 3626 /* Also include the reset IO */ 3627 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3628 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3629 poll_threads(); 3630 stub_complete_io(1); 3631 poll_threads(); 3632 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3633 3634 /* This is part2 3635 * Test the desc timeout poller register 3636 */ 3637 3638 /* Successfully set the timeout */ 3639 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3640 CU_ASSERT(desc->io_timeout_poller != NULL); 3641 CU_ASSERT(desc->timeout_in_sec == 30); 3642 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3643 CU_ASSERT(desc->cb_arg == &cb_arg); 3644 3645 /* Change the timeout limit */ 3646 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3647 CU_ASSERT(desc->io_timeout_poller != NULL); 3648 CU_ASSERT(desc->timeout_in_sec == 20); 3649 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 3650 CU_ASSERT(desc->cb_arg == &cb_arg); 3651 3652 /* Disable the timeout */ 3653 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 3654 CU_ASSERT(desc->io_timeout_poller == NULL); 3655 3656 /* This the part3 3657 * We will test to catch timeout IO and check whether the IO is 3658 * the submitted one. 3659 */ 3660 memset(&cb_arg, 0, sizeof(cb_arg)); 3661 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 3662 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 3663 3664 /* Don't reach the limit */ 3665 spdk_delay_us(15 * spdk_get_ticks_hz()); 3666 poll_threads(); 3667 CU_ASSERT(cb_arg.type == 0); 3668 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3669 CU_ASSERT(cb_arg.iov.iov_len == 0); 3670 3671 /* 15 + 15 = 30 reach the limit */ 3672 spdk_delay_us(15 * spdk_get_ticks_hz()); 3673 poll_threads(); 3674 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3675 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 3676 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 3677 stub_complete_io(1); 3678 3679 /* Use the same split IO above and check the IO */ 3680 memset(&cb_arg, 0, sizeof(cb_arg)); 3681 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 3682 3683 /* The first child complete in time */ 3684 spdk_delay_us(15 * spdk_get_ticks_hz()); 3685 poll_threads(); 3686 stub_complete_io(1); 3687 CU_ASSERT(cb_arg.type == 0); 3688 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 3689 CU_ASSERT(cb_arg.iov.iov_len == 0); 3690 3691 /* The second child reach the limit */ 3692 spdk_delay_us(15 * spdk_get_ticks_hz()); 3693 poll_threads(); 3694 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 3695 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 3696 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 3697 stub_complete_io(1); 3698 3699 /* Also include the reset IO */ 3700 memset(&cb_arg, 0, sizeof(cb_arg)); 3701 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 3702 spdk_delay_us(30 * spdk_get_ticks_hz()); 3703 poll_threads(); 3704 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 3705 stub_complete_io(1); 3706 poll_threads(); 3707 3708 spdk_put_io_channel(io_ch); 3709 spdk_bdev_close(desc); 3710 free_bdev(bdev); 3711 spdk_bdev_finish(bdev_fini_cb, NULL); 3712 poll_threads(); 3713 } 3714 3715 static void 3716 lba_range_overlap(void) 3717 { 3718 struct lba_range r1, r2; 3719 3720 r1.offset = 100; 3721 r1.length = 50; 3722 3723 r2.offset = 0; 3724 r2.length = 1; 3725 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3726 3727 r2.offset = 0; 3728 r2.length = 100; 3729 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3730 3731 r2.offset = 0; 3732 r2.length = 110; 3733 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3734 3735 r2.offset = 100; 3736 r2.length = 10; 3737 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3738 3739 r2.offset = 110; 3740 r2.length = 20; 3741 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3742 3743 r2.offset = 140; 3744 r2.length = 150; 3745 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3746 3747 r2.offset = 130; 3748 r2.length = 200; 3749 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 3750 3751 r2.offset = 150; 3752 r2.length = 100; 3753 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3754 3755 r2.offset = 110; 3756 r2.length = 0; 3757 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 3758 } 3759 3760 static bool g_lock_lba_range_done; 3761 static bool g_unlock_lba_range_done; 3762 3763 static void 3764 lock_lba_range_done(void *ctx, int status) 3765 { 3766 g_lock_lba_range_done = true; 3767 } 3768 3769 static void 3770 unlock_lba_range_done(void *ctx, int status) 3771 { 3772 g_unlock_lba_range_done = true; 3773 } 3774 3775 static void 3776 lock_lba_range_check_ranges(void) 3777 { 3778 struct spdk_bdev *bdev; 3779 struct spdk_bdev_desc *desc = NULL; 3780 struct spdk_io_channel *io_ch; 3781 struct spdk_bdev_channel *channel; 3782 struct lba_range *range; 3783 int ctx1; 3784 int rc; 3785 3786 spdk_bdev_initialize(bdev_init_cb, NULL); 3787 3788 bdev = allocate_bdev("bdev0"); 3789 3790 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3791 CU_ASSERT(rc == 0); 3792 CU_ASSERT(desc != NULL); 3793 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3794 io_ch = spdk_bdev_get_io_channel(desc); 3795 CU_ASSERT(io_ch != NULL); 3796 channel = spdk_io_channel_get_ctx(io_ch); 3797 3798 g_lock_lba_range_done = false; 3799 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3800 CU_ASSERT(rc == 0); 3801 poll_threads(); 3802 3803 CU_ASSERT(g_lock_lba_range_done == true); 3804 range = TAILQ_FIRST(&channel->locked_ranges); 3805 SPDK_CU_ASSERT_FATAL(range != NULL); 3806 CU_ASSERT(range->offset == 20); 3807 CU_ASSERT(range->length == 10); 3808 CU_ASSERT(range->owner_ch == channel); 3809 3810 /* Unlocks must exactly match a lock. */ 3811 g_unlock_lba_range_done = false; 3812 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 3813 CU_ASSERT(rc == -EINVAL); 3814 CU_ASSERT(g_unlock_lba_range_done == false); 3815 3816 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3817 CU_ASSERT(rc == 0); 3818 spdk_delay_us(100); 3819 poll_threads(); 3820 3821 CU_ASSERT(g_unlock_lba_range_done == true); 3822 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3823 3824 spdk_put_io_channel(io_ch); 3825 spdk_bdev_close(desc); 3826 free_bdev(bdev); 3827 spdk_bdev_finish(bdev_fini_cb, NULL); 3828 poll_threads(); 3829 } 3830 3831 static void 3832 lock_lba_range_with_io_outstanding(void) 3833 { 3834 struct spdk_bdev *bdev; 3835 struct spdk_bdev_desc *desc = NULL; 3836 struct spdk_io_channel *io_ch; 3837 struct spdk_bdev_channel *channel; 3838 struct lba_range *range; 3839 char buf[4096]; 3840 int ctx1; 3841 int rc; 3842 3843 spdk_bdev_initialize(bdev_init_cb, NULL); 3844 3845 bdev = allocate_bdev("bdev0"); 3846 3847 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3848 CU_ASSERT(rc == 0); 3849 CU_ASSERT(desc != NULL); 3850 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3851 io_ch = spdk_bdev_get_io_channel(desc); 3852 CU_ASSERT(io_ch != NULL); 3853 channel = spdk_io_channel_get_ctx(io_ch); 3854 3855 g_io_done = false; 3856 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3857 CU_ASSERT(rc == 0); 3858 3859 g_lock_lba_range_done = false; 3860 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3861 CU_ASSERT(rc == 0); 3862 poll_threads(); 3863 3864 /* The lock should immediately become valid, since there are no outstanding 3865 * write I/O. 3866 */ 3867 CU_ASSERT(g_io_done == false); 3868 CU_ASSERT(g_lock_lba_range_done == true); 3869 range = TAILQ_FIRST(&channel->locked_ranges); 3870 SPDK_CU_ASSERT_FATAL(range != NULL); 3871 CU_ASSERT(range->offset == 20); 3872 CU_ASSERT(range->length == 10); 3873 CU_ASSERT(range->owner_ch == channel); 3874 CU_ASSERT(range->locked_ctx == &ctx1); 3875 3876 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3877 CU_ASSERT(rc == 0); 3878 stub_complete_io(1); 3879 spdk_delay_us(100); 3880 poll_threads(); 3881 3882 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3883 3884 /* Now try again, but with a write I/O. */ 3885 g_io_done = false; 3886 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3887 CU_ASSERT(rc == 0); 3888 3889 g_lock_lba_range_done = false; 3890 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3891 CU_ASSERT(rc == 0); 3892 poll_threads(); 3893 3894 /* The lock should not be fully valid yet, since a write I/O is outstanding. 3895 * But note that the range should be on the channel's locked_list, to make sure no 3896 * new write I/O are started. 3897 */ 3898 CU_ASSERT(g_io_done == false); 3899 CU_ASSERT(g_lock_lba_range_done == false); 3900 range = TAILQ_FIRST(&channel->locked_ranges); 3901 SPDK_CU_ASSERT_FATAL(range != NULL); 3902 CU_ASSERT(range->offset == 20); 3903 CU_ASSERT(range->length == 10); 3904 3905 /* Complete the write I/O. This should make the lock valid (checked by confirming 3906 * our callback was invoked). 3907 */ 3908 stub_complete_io(1); 3909 spdk_delay_us(100); 3910 poll_threads(); 3911 CU_ASSERT(g_io_done == true); 3912 CU_ASSERT(g_lock_lba_range_done == true); 3913 3914 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3915 CU_ASSERT(rc == 0); 3916 poll_threads(); 3917 3918 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3919 3920 spdk_put_io_channel(io_ch); 3921 spdk_bdev_close(desc); 3922 free_bdev(bdev); 3923 spdk_bdev_finish(bdev_fini_cb, NULL); 3924 poll_threads(); 3925 } 3926 3927 static void 3928 lock_lba_range_overlapped(void) 3929 { 3930 struct spdk_bdev *bdev; 3931 struct spdk_bdev_desc *desc = NULL; 3932 struct spdk_io_channel *io_ch; 3933 struct spdk_bdev_channel *channel; 3934 struct lba_range *range; 3935 int ctx1; 3936 int rc; 3937 3938 spdk_bdev_initialize(bdev_init_cb, NULL); 3939 3940 bdev = allocate_bdev("bdev0"); 3941 3942 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3943 CU_ASSERT(rc == 0); 3944 CU_ASSERT(desc != NULL); 3945 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3946 io_ch = spdk_bdev_get_io_channel(desc); 3947 CU_ASSERT(io_ch != NULL); 3948 channel = spdk_io_channel_get_ctx(io_ch); 3949 3950 /* Lock range 20-29. */ 3951 g_lock_lba_range_done = false; 3952 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3953 CU_ASSERT(rc == 0); 3954 poll_threads(); 3955 3956 CU_ASSERT(g_lock_lba_range_done == true); 3957 range = TAILQ_FIRST(&channel->locked_ranges); 3958 SPDK_CU_ASSERT_FATAL(range != NULL); 3959 CU_ASSERT(range->offset == 20); 3960 CU_ASSERT(range->length == 10); 3961 3962 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 3963 * 20-29. 3964 */ 3965 g_lock_lba_range_done = false; 3966 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 3967 CU_ASSERT(rc == 0); 3968 poll_threads(); 3969 3970 CU_ASSERT(g_lock_lba_range_done == false); 3971 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3972 SPDK_CU_ASSERT_FATAL(range != NULL); 3973 CU_ASSERT(range->offset == 25); 3974 CU_ASSERT(range->length == 15); 3975 3976 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 3977 * no longer overlaps with an active lock. 3978 */ 3979 g_unlock_lba_range_done = false; 3980 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3981 CU_ASSERT(rc == 0); 3982 poll_threads(); 3983 3984 CU_ASSERT(g_unlock_lba_range_done == true); 3985 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3986 range = TAILQ_FIRST(&channel->locked_ranges); 3987 SPDK_CU_ASSERT_FATAL(range != NULL); 3988 CU_ASSERT(range->offset == 25); 3989 CU_ASSERT(range->length == 15); 3990 3991 /* Lock 40-59. This should immediately lock since it does not overlap with the 3992 * currently active 25-39 lock. 3993 */ 3994 g_lock_lba_range_done = false; 3995 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 3996 CU_ASSERT(rc == 0); 3997 poll_threads(); 3998 3999 CU_ASSERT(g_lock_lba_range_done == true); 4000 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4001 SPDK_CU_ASSERT_FATAL(range != NULL); 4002 range = TAILQ_NEXT(range, tailq); 4003 SPDK_CU_ASSERT_FATAL(range != NULL); 4004 CU_ASSERT(range->offset == 40); 4005 CU_ASSERT(range->length == 20); 4006 4007 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4008 g_lock_lba_range_done = false; 4009 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4010 CU_ASSERT(rc == 0); 4011 poll_threads(); 4012 4013 CU_ASSERT(g_lock_lba_range_done == false); 4014 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4015 SPDK_CU_ASSERT_FATAL(range != NULL); 4016 CU_ASSERT(range->offset == 35); 4017 CU_ASSERT(range->length == 10); 4018 4019 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4020 * the 40-59 lock is still active. 4021 */ 4022 g_unlock_lba_range_done = false; 4023 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4024 CU_ASSERT(rc == 0); 4025 poll_threads(); 4026 4027 CU_ASSERT(g_unlock_lba_range_done == true); 4028 CU_ASSERT(g_lock_lba_range_done == false); 4029 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4030 SPDK_CU_ASSERT_FATAL(range != NULL); 4031 CU_ASSERT(range->offset == 35); 4032 CU_ASSERT(range->length == 10); 4033 4034 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4035 * no longer any active overlapping locks. 4036 */ 4037 g_unlock_lba_range_done = false; 4038 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4039 CU_ASSERT(rc == 0); 4040 poll_threads(); 4041 4042 CU_ASSERT(g_unlock_lba_range_done == true); 4043 CU_ASSERT(g_lock_lba_range_done == true); 4044 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4045 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4046 SPDK_CU_ASSERT_FATAL(range != NULL); 4047 CU_ASSERT(range->offset == 35); 4048 CU_ASSERT(range->length == 10); 4049 4050 /* Finally, unlock 35-44. */ 4051 g_unlock_lba_range_done = false; 4052 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4053 CU_ASSERT(rc == 0); 4054 poll_threads(); 4055 4056 CU_ASSERT(g_unlock_lba_range_done == true); 4057 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4058 4059 spdk_put_io_channel(io_ch); 4060 spdk_bdev_close(desc); 4061 free_bdev(bdev); 4062 spdk_bdev_finish(bdev_fini_cb, NULL); 4063 poll_threads(); 4064 } 4065 4066 static void 4067 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4068 { 4069 g_abort_done = true; 4070 g_abort_status = bdev_io->internal.status; 4071 spdk_bdev_free_io(bdev_io); 4072 } 4073 4074 static void 4075 bdev_io_abort(void) 4076 { 4077 struct spdk_bdev *bdev; 4078 struct spdk_bdev_desc *desc = NULL; 4079 struct spdk_io_channel *io_ch; 4080 struct spdk_bdev_channel *channel; 4081 struct spdk_bdev_mgmt_channel *mgmt_ch; 4082 struct spdk_bdev_opts bdev_opts = {}; 4083 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4084 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4085 int rc; 4086 4087 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4088 bdev_opts.bdev_io_pool_size = 7; 4089 bdev_opts.bdev_io_cache_size = 2; 4090 4091 rc = spdk_bdev_set_opts(&bdev_opts); 4092 CU_ASSERT(rc == 0); 4093 spdk_bdev_initialize(bdev_init_cb, NULL); 4094 4095 bdev = allocate_bdev("bdev0"); 4096 4097 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4098 CU_ASSERT(rc == 0); 4099 CU_ASSERT(desc != NULL); 4100 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4101 io_ch = spdk_bdev_get_io_channel(desc); 4102 CU_ASSERT(io_ch != NULL); 4103 channel = spdk_io_channel_get_ctx(io_ch); 4104 mgmt_ch = channel->shared_resource->mgmt_ch; 4105 4106 g_abort_done = false; 4107 4108 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4109 4110 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4111 CU_ASSERT(rc == -ENOTSUP); 4112 4113 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4114 4115 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4116 CU_ASSERT(rc == 0); 4117 CU_ASSERT(g_abort_done == true); 4118 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4119 4120 /* Test the case that the target I/O was successfully aborted. */ 4121 g_io_done = false; 4122 4123 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4124 CU_ASSERT(rc == 0); 4125 CU_ASSERT(g_io_done == false); 4126 4127 g_abort_done = false; 4128 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4129 4130 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4131 CU_ASSERT(rc == 0); 4132 CU_ASSERT(g_io_done == true); 4133 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4134 stub_complete_io(1); 4135 CU_ASSERT(g_abort_done == true); 4136 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4137 4138 /* Test the case that the target I/O was not aborted because it completed 4139 * in the middle of execution of the abort. 4140 */ 4141 g_io_done = false; 4142 4143 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4144 CU_ASSERT(rc == 0); 4145 CU_ASSERT(g_io_done == false); 4146 4147 g_abort_done = false; 4148 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4149 4150 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4151 CU_ASSERT(rc == 0); 4152 CU_ASSERT(g_io_done == false); 4153 4154 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4155 stub_complete_io(1); 4156 CU_ASSERT(g_io_done == true); 4157 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4158 4159 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4160 stub_complete_io(1); 4161 CU_ASSERT(g_abort_done == true); 4162 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4163 4164 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4165 4166 bdev->optimal_io_boundary = 16; 4167 bdev->split_on_optimal_io_boundary = true; 4168 4169 /* Test that a single-vector command which is split is aborted correctly. 4170 * Offset 14, length 8, payload 0xF000 4171 * Child - Offset 14, length 2, payload 0xF000 4172 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4173 */ 4174 g_io_done = false; 4175 4176 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4177 CU_ASSERT(rc == 0); 4178 CU_ASSERT(g_io_done == false); 4179 4180 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4181 4182 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4183 4184 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4185 CU_ASSERT(rc == 0); 4186 CU_ASSERT(g_io_done == true); 4187 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4188 stub_complete_io(2); 4189 CU_ASSERT(g_abort_done == true); 4190 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4191 4192 /* Test that a multi-vector command that needs to be split by strip and then 4193 * needs to be split is aborted correctly. Abort is requested before the second 4194 * child I/O was submitted. The parent I/O should complete with failure without 4195 * submitting the second child I/O. 4196 */ 4197 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4198 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4199 iov[i].iov_len = 512; 4200 } 4201 4202 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4203 g_io_done = false; 4204 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4205 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4206 CU_ASSERT(rc == 0); 4207 CU_ASSERT(g_io_done == false); 4208 4209 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4210 4211 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4212 4213 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4214 CU_ASSERT(rc == 0); 4215 CU_ASSERT(g_io_done == true); 4216 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4217 stub_complete_io(1); 4218 CU_ASSERT(g_abort_done == true); 4219 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4220 4221 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4222 4223 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4224 4225 bdev->optimal_io_boundary = 16; 4226 g_io_done = false; 4227 4228 /* Test that a ingle-vector command which is split is aborted correctly. 4229 * Differently from the above, the child abort request will be submitted 4230 * sequentially due to the capacity of spdk_bdev_io. 4231 */ 4232 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4233 CU_ASSERT(rc == 0); 4234 CU_ASSERT(g_io_done == false); 4235 4236 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4237 4238 g_abort_done = false; 4239 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4240 4241 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4242 CU_ASSERT(rc == 0); 4243 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4245 4246 stub_complete_io(1); 4247 CU_ASSERT(g_io_done == true); 4248 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4249 stub_complete_io(3); 4250 CU_ASSERT(g_abort_done == true); 4251 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4252 4253 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4254 4255 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4256 4257 spdk_put_io_channel(io_ch); 4258 spdk_bdev_close(desc); 4259 free_bdev(bdev); 4260 spdk_bdev_finish(bdev_fini_cb, NULL); 4261 poll_threads(); 4262 } 4263 4264 static void 4265 bdev_set_options_test(void) 4266 { 4267 struct spdk_bdev_opts bdev_opts = {}; 4268 int rc; 4269 4270 /* Case1: Do not set opts_size */ 4271 rc = spdk_bdev_set_opts(&bdev_opts); 4272 CU_ASSERT(rc == -1); 4273 4274 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4275 bdev_opts.bdev_io_pool_size = 4; 4276 bdev_opts.bdev_io_cache_size = 2; 4277 bdev_opts.small_buf_pool_size = 4; 4278 4279 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4280 rc = spdk_bdev_set_opts(&bdev_opts); 4281 CU_ASSERT(rc == -1); 4282 4283 /* Case 3: Do not set valid large_buf_pool_size */ 4284 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4285 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4286 rc = spdk_bdev_set_opts(&bdev_opts); 4287 CU_ASSERT(rc == -1); 4288 4289 /* Case4: set valid large buf_pool_size */ 4290 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4291 rc = spdk_bdev_set_opts(&bdev_opts); 4292 CU_ASSERT(rc == 0); 4293 4294 /* Case5: Set different valid value for small and large buf pool */ 4295 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4296 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4297 rc = spdk_bdev_set_opts(&bdev_opts); 4298 CU_ASSERT(rc == 0); 4299 } 4300 4301 int 4302 main(int argc, char **argv) 4303 { 4304 CU_pSuite suite = NULL; 4305 unsigned int num_failures; 4306 4307 CU_set_error_action(CUEA_ABORT); 4308 CU_initialize_registry(); 4309 4310 suite = CU_add_suite("bdev", null_init, null_clean); 4311 4312 CU_ADD_TEST(suite, bytes_to_blocks_test); 4313 CU_ADD_TEST(suite, num_blocks_test); 4314 CU_ADD_TEST(suite, io_valid_test); 4315 CU_ADD_TEST(suite, open_write_test); 4316 CU_ADD_TEST(suite, alias_add_del_test); 4317 CU_ADD_TEST(suite, get_device_stat_test); 4318 CU_ADD_TEST(suite, bdev_io_types_test); 4319 CU_ADD_TEST(suite, bdev_io_wait_test); 4320 CU_ADD_TEST(suite, bdev_io_spans_split_test); 4321 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 4322 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 4323 CU_ADD_TEST(suite, bdev_io_mix_split_test); 4324 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 4325 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 4326 CU_ADD_TEST(suite, bdev_io_alignment); 4327 CU_ADD_TEST(suite, bdev_histograms); 4328 CU_ADD_TEST(suite, bdev_write_zeroes); 4329 CU_ADD_TEST(suite, bdev_compare_and_write); 4330 CU_ADD_TEST(suite, bdev_compare); 4331 CU_ADD_TEST(suite, bdev_open_while_hotremove); 4332 CU_ADD_TEST(suite, bdev_close_while_hotremove); 4333 CU_ADD_TEST(suite, bdev_open_ext); 4334 CU_ADD_TEST(suite, bdev_set_io_timeout); 4335 CU_ADD_TEST(suite, lba_range_overlap); 4336 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 4337 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 4338 CU_ADD_TEST(suite, lock_lba_range_overlapped); 4339 CU_ADD_TEST(suite, bdev_io_abort); 4340 CU_ADD_TEST(suite, bdev_set_options_test); 4341 4342 allocate_cores(1); 4343 allocate_threads(1); 4344 set_thread(0); 4345 4346 CU_basic_set_mode(CU_BRM_VERBOSE); 4347 CU_basic_run_tests(); 4348 num_failures = CU_get_number_of_failures(); 4349 CU_cleanup_registry(); 4350 4351 free_threads(); 4352 free_cores(); 4353 4354 return num_failures; 4355 } 4356