1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp, 46 const char *name), NULL); 47 DEFINE_STUB(spdk_conf_section_get_nmval, char *, 48 (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL); 49 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1); 50 51 struct spdk_trace_histories *g_trace_histories; 52 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 53 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 54 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 55 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 56 uint16_t tpoint_id, uint8_t owner_type, 57 uint8_t object_type, uint8_t new_object, 58 uint8_t arg1_type, const char *arg1_name)); 59 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 60 uint32_t size, uint64_t object_id, uint64_t arg1)); 61 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 62 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 63 64 65 int g_status; 66 int g_count; 67 enum spdk_bdev_event_type g_event_type1; 68 enum spdk_bdev_event_type g_event_type2; 69 struct spdk_histogram_data *g_histogram; 70 void *g_unregister_arg; 71 int g_unregister_rc; 72 73 void 74 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 75 int *sc, int *sk, int *asc, int *ascq) 76 { 77 } 78 79 static int 80 null_init(void) 81 { 82 return 0; 83 } 84 85 static int 86 null_clean(void) 87 { 88 return 0; 89 } 90 91 static int 92 stub_destruct(void *ctx) 93 { 94 return 0; 95 } 96 97 struct ut_expected_io { 98 uint8_t type; 99 uint64_t offset; 100 uint64_t length; 101 int iovcnt; 102 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 103 void *md_buf; 104 TAILQ_ENTRY(ut_expected_io) link; 105 }; 106 107 struct bdev_ut_channel { 108 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 109 uint32_t outstanding_io_count; 110 TAILQ_HEAD(, ut_expected_io) expected_io; 111 }; 112 113 static bool g_io_done; 114 static struct spdk_bdev_io *g_bdev_io; 115 static enum spdk_bdev_io_status g_io_status; 116 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 117 static uint32_t g_bdev_ut_io_device; 118 static struct bdev_ut_channel *g_bdev_ut_channel; 119 120 static struct ut_expected_io * 121 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 122 { 123 struct ut_expected_io *expected_io; 124 125 expected_io = calloc(1, sizeof(*expected_io)); 126 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 127 128 expected_io->type = type; 129 expected_io->offset = offset; 130 expected_io->length = length; 131 expected_io->iovcnt = iovcnt; 132 133 return expected_io; 134 } 135 136 static void 137 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 138 { 139 expected_io->iov[pos].iov_base = base; 140 expected_io->iov[pos].iov_len = len; 141 } 142 143 static void 144 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 145 { 146 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 147 struct ut_expected_io *expected_io; 148 struct iovec *iov, *expected_iov; 149 int i; 150 151 g_bdev_io = bdev_io; 152 153 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 154 ch->outstanding_io_count++; 155 156 expected_io = TAILQ_FIRST(&ch->expected_io); 157 if (expected_io == NULL) { 158 return; 159 } 160 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 161 162 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 163 CU_ASSERT(bdev_io->type == expected_io->type); 164 } 165 166 if (expected_io->md_buf != NULL) { 167 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 168 } 169 170 if (expected_io->length == 0) { 171 free(expected_io); 172 return; 173 } 174 175 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 176 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 177 178 if (expected_io->iovcnt == 0) { 179 free(expected_io); 180 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 181 return; 182 } 183 184 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 185 for (i = 0; i < expected_io->iovcnt; i++) { 186 iov = &bdev_io->u.bdev.iovs[i]; 187 expected_iov = &expected_io->iov[i]; 188 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 189 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 190 } 191 192 free(expected_io); 193 } 194 195 static void 196 stub_submit_request_aligned_buffer_cb(struct spdk_io_channel *_ch, 197 struct spdk_bdev_io *bdev_io, bool success) 198 { 199 CU_ASSERT(success == true); 200 201 stub_submit_request(_ch, bdev_io); 202 } 203 204 static void 205 stub_submit_request_aligned_buffer(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 206 { 207 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_aligned_buffer_cb, 208 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 209 } 210 211 static uint32_t 212 stub_complete_io(uint32_t num_to_complete) 213 { 214 struct bdev_ut_channel *ch = g_bdev_ut_channel; 215 struct spdk_bdev_io *bdev_io; 216 static enum spdk_bdev_io_status io_status; 217 uint32_t num_completed = 0; 218 219 while (num_completed < num_to_complete) { 220 if (TAILQ_EMPTY(&ch->outstanding_io)) { 221 break; 222 } 223 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 224 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 225 ch->outstanding_io_count--; 226 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 227 g_io_exp_status; 228 spdk_bdev_io_complete(bdev_io, io_status); 229 num_completed++; 230 } 231 232 return num_completed; 233 } 234 235 static struct spdk_io_channel * 236 bdev_ut_get_io_channel(void *ctx) 237 { 238 return spdk_get_io_channel(&g_bdev_ut_io_device); 239 } 240 241 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 242 [SPDK_BDEV_IO_TYPE_READ] = true, 243 [SPDK_BDEV_IO_TYPE_WRITE] = true, 244 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 245 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 246 [SPDK_BDEV_IO_TYPE_RESET] = true, 247 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 248 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 249 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 250 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 251 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 252 }; 253 254 static void 255 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 256 { 257 g_io_types_supported[io_type] = enable; 258 } 259 260 static bool 261 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 262 { 263 return g_io_types_supported[io_type]; 264 } 265 266 static struct spdk_bdev_fn_table fn_table = { 267 .destruct = stub_destruct, 268 .submit_request = stub_submit_request, 269 .get_io_channel = bdev_ut_get_io_channel, 270 .io_type_supported = stub_io_type_supported, 271 }; 272 273 static int 274 bdev_ut_create_ch(void *io_device, void *ctx_buf) 275 { 276 struct bdev_ut_channel *ch = ctx_buf; 277 278 CU_ASSERT(g_bdev_ut_channel == NULL); 279 g_bdev_ut_channel = ch; 280 281 TAILQ_INIT(&ch->outstanding_io); 282 ch->outstanding_io_count = 0; 283 TAILQ_INIT(&ch->expected_io); 284 return 0; 285 } 286 287 static void 288 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 289 { 290 CU_ASSERT(g_bdev_ut_channel != NULL); 291 g_bdev_ut_channel = NULL; 292 } 293 294 struct spdk_bdev_module bdev_ut_if; 295 296 static int 297 bdev_ut_module_init(void) 298 { 299 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 300 sizeof(struct bdev_ut_channel), NULL); 301 spdk_bdev_module_init_done(&bdev_ut_if); 302 return 0; 303 } 304 305 static void 306 bdev_ut_module_fini(void) 307 { 308 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 309 } 310 311 struct spdk_bdev_module bdev_ut_if = { 312 .name = "bdev_ut", 313 .module_init = bdev_ut_module_init, 314 .module_fini = bdev_ut_module_fini, 315 .async_init = true, 316 }; 317 318 static void vbdev_ut_examine(struct spdk_bdev *bdev); 319 320 static int 321 vbdev_ut_module_init(void) 322 { 323 return 0; 324 } 325 326 static void 327 vbdev_ut_module_fini(void) 328 { 329 } 330 331 struct spdk_bdev_module vbdev_ut_if = { 332 .name = "vbdev_ut", 333 .module_init = vbdev_ut_module_init, 334 .module_fini = vbdev_ut_module_fini, 335 .examine_config = vbdev_ut_examine, 336 }; 337 338 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 339 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 340 341 static void 342 vbdev_ut_examine(struct spdk_bdev *bdev) 343 { 344 spdk_bdev_module_examine_done(&vbdev_ut_if); 345 } 346 347 static struct spdk_bdev * 348 allocate_bdev(char *name) 349 { 350 struct spdk_bdev *bdev; 351 int rc; 352 353 bdev = calloc(1, sizeof(*bdev)); 354 SPDK_CU_ASSERT_FATAL(bdev != NULL); 355 356 bdev->name = name; 357 bdev->fn_table = &fn_table; 358 bdev->module = &bdev_ut_if; 359 bdev->blockcnt = 1024; 360 bdev->blocklen = 512; 361 362 rc = spdk_bdev_register(bdev); 363 CU_ASSERT(rc == 0); 364 365 return bdev; 366 } 367 368 static struct spdk_bdev * 369 allocate_vbdev(char *name) 370 { 371 struct spdk_bdev *bdev; 372 int rc; 373 374 bdev = calloc(1, sizeof(*bdev)); 375 SPDK_CU_ASSERT_FATAL(bdev != NULL); 376 377 bdev->name = name; 378 bdev->fn_table = &fn_table; 379 bdev->module = &vbdev_ut_if; 380 381 rc = spdk_bdev_register(bdev); 382 CU_ASSERT(rc == 0); 383 384 return bdev; 385 } 386 387 static void 388 free_bdev(struct spdk_bdev *bdev) 389 { 390 spdk_bdev_unregister(bdev, NULL, NULL); 391 poll_threads(); 392 memset(bdev, 0xFF, sizeof(*bdev)); 393 free(bdev); 394 } 395 396 static void 397 free_vbdev(struct spdk_bdev *bdev) 398 { 399 spdk_bdev_unregister(bdev, NULL, NULL); 400 poll_threads(); 401 memset(bdev, 0xFF, sizeof(*bdev)); 402 free(bdev); 403 } 404 405 static void 406 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 407 { 408 const char *bdev_name; 409 410 CU_ASSERT(bdev != NULL); 411 CU_ASSERT(rc == 0); 412 bdev_name = spdk_bdev_get_name(bdev); 413 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 414 415 free(stat); 416 free_bdev(bdev); 417 418 *(bool *)cb_arg = true; 419 } 420 421 static void 422 bdev_unregister_cb(void *cb_arg, int rc) 423 { 424 g_unregister_arg = cb_arg; 425 g_unregister_rc = rc; 426 } 427 428 static void 429 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 430 { 431 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 432 433 g_event_type1 = type; 434 if (SPDK_BDEV_EVENT_REMOVE == type) { 435 spdk_bdev_close(desc); 436 } 437 } 438 439 static void 440 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 441 { 442 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 443 444 g_event_type2 = type; 445 if (SPDK_BDEV_EVENT_REMOVE == type) { 446 spdk_bdev_close(desc); 447 } 448 } 449 450 static void 451 get_device_stat_test(void) 452 { 453 struct spdk_bdev *bdev; 454 struct spdk_bdev_io_stat *stat; 455 bool done; 456 457 bdev = allocate_bdev("bdev0"); 458 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 459 if (stat == NULL) { 460 free_bdev(bdev); 461 return; 462 } 463 464 done = false; 465 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 466 while (!done) { poll_threads(); } 467 468 469 } 470 471 static void 472 open_write_test(void) 473 { 474 struct spdk_bdev *bdev[9]; 475 struct spdk_bdev_desc *desc[9] = {}; 476 int rc; 477 478 /* 479 * Create a tree of bdevs to test various open w/ write cases. 480 * 481 * bdev0 through bdev3 are physical block devices, such as NVMe 482 * namespaces or Ceph block devices. 483 * 484 * bdev4 is a virtual bdev with multiple base bdevs. This models 485 * caching or RAID use cases. 486 * 487 * bdev5 through bdev7 are all virtual bdevs with the same base 488 * bdev (except bdev7). This models partitioning or logical volume 489 * use cases. 490 * 491 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 492 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 493 * models caching, RAID, partitioning or logical volumes use cases. 494 * 495 * bdev8 is a virtual bdev with multiple base bdevs, but these 496 * base bdevs are themselves virtual bdevs. 497 * 498 * bdev8 499 * | 500 * +----------+ 501 * | | 502 * bdev4 bdev5 bdev6 bdev7 503 * | | | | 504 * +---+---+ +---+ + +---+---+ 505 * | | \ | / \ 506 * bdev0 bdev1 bdev2 bdev3 507 */ 508 509 bdev[0] = allocate_bdev("bdev0"); 510 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 511 CU_ASSERT(rc == 0); 512 513 bdev[1] = allocate_bdev("bdev1"); 514 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 515 CU_ASSERT(rc == 0); 516 517 bdev[2] = allocate_bdev("bdev2"); 518 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 519 CU_ASSERT(rc == 0); 520 521 bdev[3] = allocate_bdev("bdev3"); 522 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 523 CU_ASSERT(rc == 0); 524 525 bdev[4] = allocate_vbdev("bdev4"); 526 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 527 CU_ASSERT(rc == 0); 528 529 bdev[5] = allocate_vbdev("bdev5"); 530 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 531 CU_ASSERT(rc == 0); 532 533 bdev[6] = allocate_vbdev("bdev6"); 534 535 bdev[7] = allocate_vbdev("bdev7"); 536 537 bdev[8] = allocate_vbdev("bdev8"); 538 539 /* Open bdev0 read-only. This should succeed. */ 540 rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]); 541 CU_ASSERT(rc == 0); 542 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 543 spdk_bdev_close(desc[0]); 544 545 /* 546 * Open bdev1 read/write. This should fail since bdev1 has been claimed 547 * by a vbdev module. 548 */ 549 rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]); 550 CU_ASSERT(rc == -EPERM); 551 552 /* 553 * Open bdev4 read/write. This should fail since bdev3 has been claimed 554 * by a vbdev module. 555 */ 556 rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]); 557 CU_ASSERT(rc == -EPERM); 558 559 /* Open bdev4 read-only. This should succeed. */ 560 rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]); 561 CU_ASSERT(rc == 0); 562 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 563 spdk_bdev_close(desc[4]); 564 565 /* 566 * Open bdev8 read/write. This should succeed since it is a leaf 567 * bdev. 568 */ 569 rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]); 570 CU_ASSERT(rc == 0); 571 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 572 spdk_bdev_close(desc[8]); 573 574 /* 575 * Open bdev5 read/write. This should fail since bdev4 has been claimed 576 * by a vbdev module. 577 */ 578 rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]); 579 CU_ASSERT(rc == -EPERM); 580 581 /* Open bdev4 read-only. This should succeed. */ 582 rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]); 583 CU_ASSERT(rc == 0); 584 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 585 spdk_bdev_close(desc[5]); 586 587 free_vbdev(bdev[8]); 588 589 free_vbdev(bdev[5]); 590 free_vbdev(bdev[6]); 591 free_vbdev(bdev[7]); 592 593 free_vbdev(bdev[4]); 594 595 free_bdev(bdev[0]); 596 free_bdev(bdev[1]); 597 free_bdev(bdev[2]); 598 free_bdev(bdev[3]); 599 } 600 601 static void 602 bytes_to_blocks_test(void) 603 { 604 struct spdk_bdev bdev; 605 uint64_t offset_blocks, num_blocks; 606 607 memset(&bdev, 0, sizeof(bdev)); 608 609 bdev.blocklen = 512; 610 611 /* All parameters valid */ 612 offset_blocks = 0; 613 num_blocks = 0; 614 CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 615 CU_ASSERT(offset_blocks == 1); 616 CU_ASSERT(num_blocks == 2); 617 618 /* Offset not a block multiple */ 619 CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 620 621 /* Length not a block multiple */ 622 CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 623 624 /* In case blocklen not the power of two */ 625 bdev.blocklen = 100; 626 CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 627 CU_ASSERT(offset_blocks == 1); 628 CU_ASSERT(num_blocks == 2); 629 630 /* Offset not a block multiple */ 631 CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 632 633 /* Length not a block multiple */ 634 CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 635 } 636 637 static void 638 num_blocks_test(void) 639 { 640 struct spdk_bdev bdev; 641 struct spdk_bdev_desc *desc = NULL; 642 struct spdk_bdev_desc *desc_ext = NULL; 643 int rc; 644 645 memset(&bdev, 0, sizeof(bdev)); 646 bdev.name = "num_blocks"; 647 bdev.fn_table = &fn_table; 648 bdev.module = &bdev_ut_if; 649 spdk_bdev_register(&bdev); 650 spdk_bdev_notify_blockcnt_change(&bdev, 50); 651 652 /* Growing block number */ 653 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 654 /* Shrinking block number */ 655 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 656 657 /* In case bdev opened */ 658 rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc); 659 CU_ASSERT(rc == 0); 660 SPDK_CU_ASSERT_FATAL(desc != NULL); 661 662 /* Growing block number */ 663 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 664 /* Shrinking block number */ 665 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 666 667 /* In case bdev opened with ext API */ 668 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext); 669 CU_ASSERT(rc == 0); 670 SPDK_CU_ASSERT_FATAL(desc_ext != NULL); 671 672 g_event_type1 = 0xFF; 673 /* Growing block number */ 674 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 675 676 poll_threads(); 677 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 678 679 g_event_type1 = 0xFF; 680 /* Growing block number and closing */ 681 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 682 683 spdk_bdev_close(desc); 684 spdk_bdev_close(desc_ext); 685 spdk_bdev_unregister(&bdev, NULL, NULL); 686 687 poll_threads(); 688 689 /* Callback is not called for closed device */ 690 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 691 } 692 693 static void 694 io_valid_test(void) 695 { 696 struct spdk_bdev bdev; 697 698 memset(&bdev, 0, sizeof(bdev)); 699 700 bdev.blocklen = 512; 701 spdk_bdev_notify_blockcnt_change(&bdev, 100); 702 703 /* All parameters valid */ 704 CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true); 705 706 /* Last valid block */ 707 CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true); 708 709 /* Offset past end of bdev */ 710 CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false); 711 712 /* Offset + length past end of bdev */ 713 CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false); 714 715 /* Offset near end of uint64_t range (2^64 - 1) */ 716 CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 717 } 718 719 static void 720 alias_add_del_test(void) 721 { 722 struct spdk_bdev *bdev[3]; 723 int rc; 724 725 /* Creating and registering bdevs */ 726 bdev[0] = allocate_bdev("bdev0"); 727 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 728 729 bdev[1] = allocate_bdev("bdev1"); 730 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 731 732 bdev[2] = allocate_bdev("bdev2"); 733 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 734 735 poll_threads(); 736 737 /* 738 * Trying adding an alias identical to name. 739 * Alias is identical to name, so it can not be added to aliases list 740 */ 741 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 742 CU_ASSERT(rc == -EEXIST); 743 744 /* 745 * Trying to add empty alias, 746 * this one should fail 747 */ 748 rc = spdk_bdev_alias_add(bdev[0], NULL); 749 CU_ASSERT(rc == -EINVAL); 750 751 /* Trying adding same alias to two different registered bdevs */ 752 753 /* Alias is used first time, so this one should pass */ 754 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 755 CU_ASSERT(rc == 0); 756 757 /* Alias was added to another bdev, so this one should fail */ 758 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 759 CU_ASSERT(rc == -EEXIST); 760 761 /* Alias is used first time, so this one should pass */ 762 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 763 CU_ASSERT(rc == 0); 764 765 /* Trying removing an alias from registered bdevs */ 766 767 /* Alias is not on a bdev aliases list, so this one should fail */ 768 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 769 CU_ASSERT(rc == -ENOENT); 770 771 /* Alias is present on a bdev aliases list, so this one should pass */ 772 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 773 CU_ASSERT(rc == 0); 774 775 /* Alias is present on a bdev aliases list, so this one should pass */ 776 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 777 CU_ASSERT(rc == 0); 778 779 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 780 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 781 CU_ASSERT(rc != 0); 782 783 /* Trying to del all alias from empty alias list */ 784 spdk_bdev_alias_del_all(bdev[2]); 785 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 786 787 /* Trying to del all alias from non-empty alias list */ 788 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 789 CU_ASSERT(rc == 0); 790 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 791 CU_ASSERT(rc == 0); 792 spdk_bdev_alias_del_all(bdev[2]); 793 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 794 795 /* Unregister and free bdevs */ 796 spdk_bdev_unregister(bdev[0], NULL, NULL); 797 spdk_bdev_unregister(bdev[1], NULL, NULL); 798 spdk_bdev_unregister(bdev[2], NULL, NULL); 799 800 poll_threads(); 801 802 free(bdev[0]); 803 free(bdev[1]); 804 free(bdev[2]); 805 } 806 807 static void 808 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 809 { 810 g_io_done = true; 811 g_io_status = bdev_io->internal.status; 812 spdk_bdev_free_io(bdev_io); 813 } 814 815 static void 816 bdev_init_cb(void *arg, int rc) 817 { 818 CU_ASSERT(rc == 0); 819 } 820 821 static void 822 bdev_fini_cb(void *arg) 823 { 824 } 825 826 struct bdev_ut_io_wait_entry { 827 struct spdk_bdev_io_wait_entry entry; 828 struct spdk_io_channel *io_ch; 829 struct spdk_bdev_desc *desc; 830 bool submitted; 831 }; 832 833 static void 834 io_wait_cb(void *arg) 835 { 836 struct bdev_ut_io_wait_entry *entry = arg; 837 int rc; 838 839 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 840 CU_ASSERT(rc == 0); 841 entry->submitted = true; 842 } 843 844 static void 845 bdev_io_types_test(void) 846 { 847 struct spdk_bdev *bdev; 848 struct spdk_bdev_desc *desc = NULL; 849 struct spdk_io_channel *io_ch; 850 struct spdk_bdev_opts bdev_opts = { 851 .bdev_io_pool_size = 4, 852 .bdev_io_cache_size = 2, 853 }; 854 int rc; 855 856 rc = spdk_bdev_set_opts(&bdev_opts); 857 CU_ASSERT(rc == 0); 858 spdk_bdev_initialize(bdev_init_cb, NULL); 859 poll_threads(); 860 861 bdev = allocate_bdev("bdev0"); 862 863 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 864 CU_ASSERT(rc == 0); 865 poll_threads(); 866 SPDK_CU_ASSERT_FATAL(desc != NULL); 867 io_ch = spdk_bdev_get_io_channel(desc); 868 CU_ASSERT(io_ch != NULL); 869 870 /* WRITE and WRITE ZEROES are not supported */ 871 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 872 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 873 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 874 CU_ASSERT(rc == -ENOTSUP); 875 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 876 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 877 878 spdk_put_io_channel(io_ch); 879 spdk_bdev_close(desc); 880 free_bdev(bdev); 881 spdk_bdev_finish(bdev_fini_cb, NULL); 882 poll_threads(); 883 } 884 885 static void 886 bdev_io_wait_test(void) 887 { 888 struct spdk_bdev *bdev; 889 struct spdk_bdev_desc *desc = NULL; 890 struct spdk_io_channel *io_ch; 891 struct spdk_bdev_opts bdev_opts = { 892 .bdev_io_pool_size = 4, 893 .bdev_io_cache_size = 2, 894 }; 895 struct bdev_ut_io_wait_entry io_wait_entry; 896 struct bdev_ut_io_wait_entry io_wait_entry2; 897 int rc; 898 899 rc = spdk_bdev_set_opts(&bdev_opts); 900 CU_ASSERT(rc == 0); 901 spdk_bdev_initialize(bdev_init_cb, NULL); 902 poll_threads(); 903 904 bdev = allocate_bdev("bdev0"); 905 906 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 907 CU_ASSERT(rc == 0); 908 poll_threads(); 909 SPDK_CU_ASSERT_FATAL(desc != NULL); 910 io_ch = spdk_bdev_get_io_channel(desc); 911 CU_ASSERT(io_ch != NULL); 912 913 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 914 CU_ASSERT(rc == 0); 915 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 916 CU_ASSERT(rc == 0); 917 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 918 CU_ASSERT(rc == 0); 919 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 920 CU_ASSERT(rc == 0); 921 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 922 923 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 924 CU_ASSERT(rc == -ENOMEM); 925 926 io_wait_entry.entry.bdev = bdev; 927 io_wait_entry.entry.cb_fn = io_wait_cb; 928 io_wait_entry.entry.cb_arg = &io_wait_entry; 929 io_wait_entry.io_ch = io_ch; 930 io_wait_entry.desc = desc; 931 io_wait_entry.submitted = false; 932 /* Cannot use the same io_wait_entry for two different calls. */ 933 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 934 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 935 936 /* Queue two I/O waits. */ 937 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 938 CU_ASSERT(rc == 0); 939 CU_ASSERT(io_wait_entry.submitted == false); 940 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 941 CU_ASSERT(rc == 0); 942 CU_ASSERT(io_wait_entry2.submitted == false); 943 944 stub_complete_io(1); 945 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 946 CU_ASSERT(io_wait_entry.submitted == true); 947 CU_ASSERT(io_wait_entry2.submitted == false); 948 949 stub_complete_io(1); 950 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 951 CU_ASSERT(io_wait_entry2.submitted == true); 952 953 stub_complete_io(4); 954 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 955 956 spdk_put_io_channel(io_ch); 957 spdk_bdev_close(desc); 958 free_bdev(bdev); 959 spdk_bdev_finish(bdev_fini_cb, NULL); 960 poll_threads(); 961 } 962 963 static void 964 bdev_io_spans_boundary_test(void) 965 { 966 struct spdk_bdev bdev; 967 struct spdk_bdev_io bdev_io; 968 969 memset(&bdev, 0, sizeof(bdev)); 970 971 bdev.optimal_io_boundary = 0; 972 bdev_io.bdev = &bdev; 973 974 /* bdev has no optimal_io_boundary set - so this should return false. */ 975 CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false); 976 977 bdev.optimal_io_boundary = 32; 978 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 979 980 /* RESETs are not based on LBAs - so this should return false. */ 981 CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false); 982 983 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 984 bdev_io.u.bdev.offset_blocks = 0; 985 bdev_io.u.bdev.num_blocks = 32; 986 987 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 988 CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false); 989 990 bdev_io.u.bdev.num_blocks = 33; 991 992 /* This I/O spans a boundary. */ 993 CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true); 994 } 995 996 static void 997 bdev_io_split(void) 998 { 999 struct spdk_bdev *bdev; 1000 struct spdk_bdev_desc *desc = NULL; 1001 struct spdk_io_channel *io_ch; 1002 struct spdk_bdev_opts bdev_opts = { 1003 .bdev_io_pool_size = 512, 1004 .bdev_io_cache_size = 64, 1005 }; 1006 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1007 struct ut_expected_io *expected_io; 1008 uint64_t i; 1009 int rc; 1010 1011 rc = spdk_bdev_set_opts(&bdev_opts); 1012 CU_ASSERT(rc == 0); 1013 spdk_bdev_initialize(bdev_init_cb, NULL); 1014 1015 bdev = allocate_bdev("bdev0"); 1016 1017 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1018 CU_ASSERT(rc == 0); 1019 SPDK_CU_ASSERT_FATAL(desc != NULL); 1020 io_ch = spdk_bdev_get_io_channel(desc); 1021 CU_ASSERT(io_ch != NULL); 1022 1023 bdev->optimal_io_boundary = 16; 1024 bdev->split_on_optimal_io_boundary = false; 1025 1026 g_io_done = false; 1027 1028 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1029 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1030 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1031 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1032 1033 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1034 CU_ASSERT(rc == 0); 1035 CU_ASSERT(g_io_done == false); 1036 1037 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1038 stub_complete_io(1); 1039 CU_ASSERT(g_io_done == true); 1040 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1041 1042 bdev->split_on_optimal_io_boundary = true; 1043 1044 /* Now test that a single-vector command is split correctly. 1045 * Offset 14, length 8, payload 0xF000 1046 * Child - Offset 14, length 2, payload 0xF000 1047 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1048 * 1049 * Set up the expected values before calling spdk_bdev_read_blocks 1050 */ 1051 g_io_done = false; 1052 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1053 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1054 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1055 1056 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1057 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1058 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1059 1060 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1061 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1062 CU_ASSERT(rc == 0); 1063 CU_ASSERT(g_io_done == false); 1064 1065 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1066 stub_complete_io(2); 1067 CU_ASSERT(g_io_done == true); 1068 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1069 1070 /* Now set up a more complex, multi-vector command that needs to be split, 1071 * including splitting iovecs. 1072 */ 1073 iov[0].iov_base = (void *)0x10000; 1074 iov[0].iov_len = 512; 1075 iov[1].iov_base = (void *)0x20000; 1076 iov[1].iov_len = 20 * 512; 1077 iov[2].iov_base = (void *)0x30000; 1078 iov[2].iov_len = 11 * 512; 1079 1080 g_io_done = false; 1081 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1082 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1083 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1084 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1085 1086 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1087 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1088 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1089 1090 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1091 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1092 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1093 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1094 1095 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1096 CU_ASSERT(rc == 0); 1097 CU_ASSERT(g_io_done == false); 1098 1099 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1100 stub_complete_io(3); 1101 CU_ASSERT(g_io_done == true); 1102 1103 /* Test multi vector command that needs to be split by strip and then needs to be 1104 * split further due to the capacity of child iovs. 1105 */ 1106 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1107 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1108 iov[i].iov_len = 512; 1109 } 1110 1111 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1112 g_io_done = false; 1113 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1114 BDEV_IO_NUM_CHILD_IOV); 1115 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1116 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1117 } 1118 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1119 1120 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1121 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1122 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1123 ut_expected_io_set_iov(expected_io, i, 1124 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1125 } 1126 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1127 1128 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1129 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1130 CU_ASSERT(rc == 0); 1131 CU_ASSERT(g_io_done == false); 1132 1133 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1134 stub_complete_io(1); 1135 CU_ASSERT(g_io_done == false); 1136 1137 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1138 stub_complete_io(1); 1139 CU_ASSERT(g_io_done == true); 1140 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1141 1142 /* Test multi vector command that needs to be split by strip and then needs to be 1143 * split further due to the capacity of child iovs. In this case, the length of 1144 * the rest of iovec array with an I/O boundary is the multiple of block size. 1145 */ 1146 1147 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1148 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1149 */ 1150 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1151 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1152 iov[i].iov_len = 512; 1153 } 1154 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1155 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1156 iov[i].iov_len = 256; 1157 } 1158 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1159 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1160 1161 /* Add an extra iovec to trigger split */ 1162 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1163 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1164 1165 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1166 g_io_done = false; 1167 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1168 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1169 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1170 ut_expected_io_set_iov(expected_io, i, 1171 (void *)((i + 1) * 0x10000), 512); 1172 } 1173 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1174 ut_expected_io_set_iov(expected_io, i, 1175 (void *)((i + 1) * 0x10000), 256); 1176 } 1177 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1178 1179 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1180 1, 1); 1181 ut_expected_io_set_iov(expected_io, 0, 1182 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1183 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1184 1185 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1186 1, 1); 1187 ut_expected_io_set_iov(expected_io, 0, 1188 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1189 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1190 1191 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 1192 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1193 CU_ASSERT(rc == 0); 1194 CU_ASSERT(g_io_done == false); 1195 1196 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1197 stub_complete_io(1); 1198 CU_ASSERT(g_io_done == false); 1199 1200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1201 stub_complete_io(2); 1202 CU_ASSERT(g_io_done == true); 1203 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1204 1205 /* Test multi vector command that needs to be split by strip and then needs to be 1206 * split further due to the capacity of child iovs, the child request offset should 1207 * be rewind to last aligned offset and go success without error. 1208 */ 1209 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1210 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1211 iov[i].iov_len = 512; 1212 } 1213 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1214 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1215 1216 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1217 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1218 1219 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1220 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1221 1222 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1223 g_io_done = false; 1224 g_io_status = 0; 1225 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1226 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1227 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1228 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1229 ut_expected_io_set_iov(expected_io, i, 1230 (void *)((i + 1) * 0x10000), 512); 1231 } 1232 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1233 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1234 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1235 1, 2); 1236 ut_expected_io_set_iov(expected_io, 0, 1237 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1238 ut_expected_io_set_iov(expected_io, 1, 1239 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1240 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1241 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1242 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1243 1, 1); 1244 ut_expected_io_set_iov(expected_io, 0, 1245 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1246 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1247 1248 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1249 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1250 CU_ASSERT(rc == 0); 1251 CU_ASSERT(g_io_done == false); 1252 1253 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1254 stub_complete_io(1); 1255 CU_ASSERT(g_io_done == false); 1256 1257 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1258 stub_complete_io(2); 1259 CU_ASSERT(g_io_done == true); 1260 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1261 1262 /* Test multi vector command that needs to be split due to the IO boundary and 1263 * the capacity of child iovs. Especially test the case when the command is 1264 * split due to the capacity of child iovs, the tail address is not aligned with 1265 * block size and is rewinded to the aligned address. 1266 * 1267 * The iovecs used in read request is complex but is based on the data 1268 * collected in the real issue. We change the base addresses but keep the lengths 1269 * not to loose the credibility of the test. 1270 */ 1271 bdev->optimal_io_boundary = 128; 1272 g_io_done = false; 1273 g_io_status = 0; 1274 1275 for (i = 0; i < 31; i++) { 1276 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1277 iov[i].iov_len = 1024; 1278 } 1279 iov[31].iov_base = (void *)0xFEED1F00000; 1280 iov[31].iov_len = 32768; 1281 iov[32].iov_base = (void *)0xFEED2000000; 1282 iov[32].iov_len = 160; 1283 iov[33].iov_base = (void *)0xFEED2100000; 1284 iov[33].iov_len = 4096; 1285 iov[34].iov_base = (void *)0xFEED2200000; 1286 iov[34].iov_len = 4096; 1287 iov[35].iov_base = (void *)0xFEED2300000; 1288 iov[35].iov_len = 4096; 1289 iov[36].iov_base = (void *)0xFEED2400000; 1290 iov[36].iov_len = 4096; 1291 iov[37].iov_base = (void *)0xFEED2500000; 1292 iov[37].iov_len = 4096; 1293 iov[38].iov_base = (void *)0xFEED2600000; 1294 iov[38].iov_len = 4096; 1295 iov[39].iov_base = (void *)0xFEED2700000; 1296 iov[39].iov_len = 4096; 1297 iov[40].iov_base = (void *)0xFEED2800000; 1298 iov[40].iov_len = 4096; 1299 iov[41].iov_base = (void *)0xFEED2900000; 1300 iov[41].iov_len = 4096; 1301 iov[42].iov_base = (void *)0xFEED2A00000; 1302 iov[42].iov_len = 4096; 1303 iov[43].iov_base = (void *)0xFEED2B00000; 1304 iov[43].iov_len = 12288; 1305 iov[44].iov_base = (void *)0xFEED2C00000; 1306 iov[44].iov_len = 8192; 1307 iov[45].iov_base = (void *)0xFEED2F00000; 1308 iov[45].iov_len = 4096; 1309 iov[46].iov_base = (void *)0xFEED3000000; 1310 iov[46].iov_len = 4096; 1311 iov[47].iov_base = (void *)0xFEED3100000; 1312 iov[47].iov_len = 4096; 1313 iov[48].iov_base = (void *)0xFEED3200000; 1314 iov[48].iov_len = 24576; 1315 iov[49].iov_base = (void *)0xFEED3300000; 1316 iov[49].iov_len = 16384; 1317 iov[50].iov_base = (void *)0xFEED3400000; 1318 iov[50].iov_len = 12288; 1319 iov[51].iov_base = (void *)0xFEED3500000; 1320 iov[51].iov_len = 4096; 1321 iov[52].iov_base = (void *)0xFEED3600000; 1322 iov[52].iov_len = 4096; 1323 iov[53].iov_base = (void *)0xFEED3700000; 1324 iov[53].iov_len = 4096; 1325 iov[54].iov_base = (void *)0xFEED3800000; 1326 iov[54].iov_len = 28672; 1327 iov[55].iov_base = (void *)0xFEED3900000; 1328 iov[55].iov_len = 20480; 1329 iov[56].iov_base = (void *)0xFEED3A00000; 1330 iov[56].iov_len = 4096; 1331 iov[57].iov_base = (void *)0xFEED3B00000; 1332 iov[57].iov_len = 12288; 1333 iov[58].iov_base = (void *)0xFEED3C00000; 1334 iov[58].iov_len = 4096; 1335 iov[59].iov_base = (void *)0xFEED3D00000; 1336 iov[59].iov_len = 4096; 1337 iov[60].iov_base = (void *)0xFEED3E00000; 1338 iov[60].iov_len = 352; 1339 1340 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1341 * of child iovs, 1342 */ 1343 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1344 for (i = 0; i < 32; i++) { 1345 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1346 } 1347 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1348 1349 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1350 * split by the IO boundary requirement. 1351 */ 1352 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1353 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1354 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1355 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1356 1357 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1358 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1359 */ 1360 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1361 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1362 iov[33].iov_len - 864); 1363 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1364 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1365 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1366 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1367 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1368 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1369 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1370 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1371 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1372 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1373 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1374 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1375 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1376 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1377 1378 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1379 * first 864 bytes of iov[52] split by the IO boundary requirement. 1380 */ 1381 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1382 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1383 iov[46].iov_len - 864); 1384 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1385 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1386 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1387 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1388 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1389 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1390 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1391 1392 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1393 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1394 */ 1395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1396 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1397 iov[52].iov_len - 864); 1398 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1399 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1400 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1401 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1402 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1403 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1404 1405 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1406 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1407 */ 1408 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1409 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1410 iov[57].iov_len - 4960); 1411 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1412 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1413 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1414 1415 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1416 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1417 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1418 iov[59].iov_len - 3936); 1419 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1420 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1421 1422 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 61, 0, 543, io_done, NULL); 1423 CU_ASSERT(rc == 0); 1424 CU_ASSERT(g_io_done == false); 1425 1426 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1427 stub_complete_io(1); 1428 CU_ASSERT(g_io_done == false); 1429 1430 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1431 stub_complete_io(5); 1432 CU_ASSERT(g_io_done == false); 1433 1434 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1435 stub_complete_io(1); 1436 CU_ASSERT(g_io_done == true); 1437 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1438 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1439 1440 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1441 * split, so test that. 1442 */ 1443 bdev->optimal_io_boundary = 15; 1444 g_io_done = false; 1445 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1446 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1447 1448 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1449 CU_ASSERT(rc == 0); 1450 CU_ASSERT(g_io_done == false); 1451 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1452 stub_complete_io(1); 1453 CU_ASSERT(g_io_done == true); 1454 1455 /* Test an UNMAP. This should also not be split. */ 1456 bdev->optimal_io_boundary = 16; 1457 g_io_done = false; 1458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1459 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1460 1461 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1462 CU_ASSERT(rc == 0); 1463 CU_ASSERT(g_io_done == false); 1464 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1465 stub_complete_io(1); 1466 CU_ASSERT(g_io_done == true); 1467 1468 /* Test a FLUSH. This should also not be split. */ 1469 bdev->optimal_io_boundary = 16; 1470 g_io_done = false; 1471 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1472 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1473 1474 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1475 CU_ASSERT(rc == 0); 1476 CU_ASSERT(g_io_done == false); 1477 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1478 stub_complete_io(1); 1479 CU_ASSERT(g_io_done == true); 1480 1481 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1482 1483 /* Children requests return an error status */ 1484 bdev->optimal_io_boundary = 16; 1485 iov[0].iov_base = (void *)0x10000; 1486 iov[0].iov_len = 512 * 64; 1487 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1488 g_io_done = false; 1489 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1490 1491 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1492 CU_ASSERT(rc == 0); 1493 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1494 stub_complete_io(4); 1495 CU_ASSERT(g_io_done == false); 1496 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1497 stub_complete_io(1); 1498 CU_ASSERT(g_io_done == true); 1499 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1500 1501 /* for this test we will create the following conditions to hit the code path where 1502 * we are trying to send and IO following a split that has no iovs because we had to 1503 * trim them for alignment reasons. 1504 * 1505 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1506 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1507 * position 30 and overshoot by 0x2e. 1508 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1509 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1510 * which eliniates that vector so we just send the first split IO with 30 vectors 1511 * and let the completion pick up the last 2 vectors. 1512 */ 1513 bdev->optimal_io_boundary = 32; 1514 bdev->split_on_optimal_io_boundary = true; 1515 g_io_done = false; 1516 1517 /* Init all parent IOVs to 0x212 */ 1518 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1519 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1520 iov[i].iov_len = 0x212; 1521 } 1522 1523 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1524 BDEV_IO_NUM_CHILD_IOV - 1); 1525 /* expect 0-29 to be 1:1 with the parent iov */ 1526 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1527 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1528 } 1529 1530 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1531 * where 0x1e is the amount we overshot the 16K boundary 1532 */ 1533 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1534 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1535 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1536 1537 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1538 * shortened that take it to the next boundary and then a final one to get us to 1539 * 0x4200 bytes for the IO. 1540 */ 1541 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1542 BDEV_IO_NUM_CHILD_IOV, 2); 1543 /* position 30 picked up the remaining bytes to the next boundary */ 1544 ut_expected_io_set_iov(expected_io, 0, 1545 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1546 1547 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1548 ut_expected_io_set_iov(expected_io, 1, 1549 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1550 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1551 1552 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1553 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1554 CU_ASSERT(rc == 0); 1555 CU_ASSERT(g_io_done == false); 1556 1557 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1558 stub_complete_io(1); 1559 CU_ASSERT(g_io_done == false); 1560 1561 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1562 stub_complete_io(1); 1563 CU_ASSERT(g_io_done == true); 1564 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1565 1566 spdk_put_io_channel(io_ch); 1567 spdk_bdev_close(desc); 1568 free_bdev(bdev); 1569 spdk_bdev_finish(bdev_fini_cb, NULL); 1570 poll_threads(); 1571 } 1572 1573 static void 1574 bdev_io_split_with_io_wait(void) 1575 { 1576 struct spdk_bdev *bdev; 1577 struct spdk_bdev_desc *desc = NULL; 1578 struct spdk_io_channel *io_ch; 1579 struct spdk_bdev_channel *channel; 1580 struct spdk_bdev_mgmt_channel *mgmt_ch; 1581 struct spdk_bdev_opts bdev_opts = { 1582 .bdev_io_pool_size = 2, 1583 .bdev_io_cache_size = 1, 1584 }; 1585 struct iovec iov[3]; 1586 struct ut_expected_io *expected_io; 1587 int rc; 1588 1589 rc = spdk_bdev_set_opts(&bdev_opts); 1590 CU_ASSERT(rc == 0); 1591 spdk_bdev_initialize(bdev_init_cb, NULL); 1592 1593 bdev = allocate_bdev("bdev0"); 1594 1595 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1596 CU_ASSERT(rc == 0); 1597 CU_ASSERT(desc != NULL); 1598 io_ch = spdk_bdev_get_io_channel(desc); 1599 CU_ASSERT(io_ch != NULL); 1600 channel = spdk_io_channel_get_ctx(io_ch); 1601 mgmt_ch = channel->shared_resource->mgmt_ch; 1602 1603 bdev->optimal_io_boundary = 16; 1604 bdev->split_on_optimal_io_boundary = true; 1605 1606 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1607 CU_ASSERT(rc == 0); 1608 1609 /* Now test that a single-vector command is split correctly. 1610 * Offset 14, length 8, payload 0xF000 1611 * Child - Offset 14, length 2, payload 0xF000 1612 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1613 * 1614 * Set up the expected values before calling spdk_bdev_read_blocks 1615 */ 1616 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1617 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1618 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1619 1620 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1621 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1622 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1623 1624 /* The following children will be submitted sequentially due to the capacity of 1625 * spdk_bdev_io. 1626 */ 1627 1628 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 1629 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1630 CU_ASSERT(rc == 0); 1631 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1632 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1633 1634 /* Completing the first read I/O will submit the first child */ 1635 stub_complete_io(1); 1636 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1637 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1638 1639 /* Completing the first child will submit the second child */ 1640 stub_complete_io(1); 1641 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1642 1643 /* Complete the second child I/O. This should result in our callback getting 1644 * invoked since the parent I/O is now complete. 1645 */ 1646 stub_complete_io(1); 1647 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1648 1649 /* Now set up a more complex, multi-vector command that needs to be split, 1650 * including splitting iovecs. 1651 */ 1652 iov[0].iov_base = (void *)0x10000; 1653 iov[0].iov_len = 512; 1654 iov[1].iov_base = (void *)0x20000; 1655 iov[1].iov_len = 20 * 512; 1656 iov[2].iov_base = (void *)0x30000; 1657 iov[2].iov_len = 11 * 512; 1658 1659 g_io_done = false; 1660 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1661 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1662 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1663 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1664 1665 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1666 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1667 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1668 1669 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1670 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1671 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1672 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1673 1674 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1675 CU_ASSERT(rc == 0); 1676 CU_ASSERT(g_io_done == false); 1677 1678 /* The following children will be submitted sequentially due to the capacity of 1679 * spdk_bdev_io. 1680 */ 1681 1682 /* Completing the first child will submit the second child */ 1683 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1684 stub_complete_io(1); 1685 CU_ASSERT(g_io_done == false); 1686 1687 /* Completing the second child will submit the third child */ 1688 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1689 stub_complete_io(1); 1690 CU_ASSERT(g_io_done == false); 1691 1692 /* Completing the third child will result in our callback getting invoked 1693 * since the parent I/O is now complete. 1694 */ 1695 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1696 stub_complete_io(1); 1697 CU_ASSERT(g_io_done == true); 1698 1699 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1700 1701 spdk_put_io_channel(io_ch); 1702 spdk_bdev_close(desc); 1703 free_bdev(bdev); 1704 spdk_bdev_finish(bdev_fini_cb, NULL); 1705 poll_threads(); 1706 } 1707 1708 static void 1709 bdev_io_alignment(void) 1710 { 1711 struct spdk_bdev *bdev; 1712 struct spdk_bdev_desc *desc = NULL; 1713 struct spdk_io_channel *io_ch; 1714 struct spdk_bdev_opts bdev_opts = { 1715 .bdev_io_pool_size = 20, 1716 .bdev_io_cache_size = 2, 1717 }; 1718 int rc; 1719 void *buf; 1720 struct iovec iovs[2]; 1721 int iovcnt; 1722 uint64_t alignment; 1723 1724 rc = spdk_bdev_set_opts(&bdev_opts); 1725 CU_ASSERT(rc == 0); 1726 spdk_bdev_initialize(bdev_init_cb, NULL); 1727 1728 fn_table.submit_request = stub_submit_request_aligned_buffer; 1729 bdev = allocate_bdev("bdev0"); 1730 1731 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1732 CU_ASSERT(rc == 0); 1733 CU_ASSERT(desc != NULL); 1734 io_ch = spdk_bdev_get_io_channel(desc); 1735 CU_ASSERT(io_ch != NULL); 1736 1737 /* Create aligned buffer */ 1738 rc = posix_memalign(&buf, 4096, 8192); 1739 SPDK_CU_ASSERT_FATAL(rc == 0); 1740 1741 /* Pass aligned single buffer with no alignment required */ 1742 alignment = 1; 1743 bdev->required_alignment = spdk_u32log2(alignment); 1744 1745 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1746 CU_ASSERT(rc == 0); 1747 stub_complete_io(1); 1748 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1749 alignment)); 1750 1751 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1752 CU_ASSERT(rc == 0); 1753 stub_complete_io(1); 1754 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1755 alignment)); 1756 1757 /* Pass unaligned single buffer with no alignment required */ 1758 alignment = 1; 1759 bdev->required_alignment = spdk_u32log2(alignment); 1760 1761 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1762 CU_ASSERT(rc == 0); 1763 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1764 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1765 stub_complete_io(1); 1766 1767 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1768 CU_ASSERT(rc == 0); 1769 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1770 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1771 stub_complete_io(1); 1772 1773 /* Pass unaligned single buffer with 512 alignment required */ 1774 alignment = 512; 1775 bdev->required_alignment = spdk_u32log2(alignment); 1776 1777 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1778 CU_ASSERT(rc == 0); 1779 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1780 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1781 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1782 alignment)); 1783 stub_complete_io(1); 1784 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1785 1786 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1787 CU_ASSERT(rc == 0); 1788 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1789 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1790 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1791 alignment)); 1792 stub_complete_io(1); 1793 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1794 1795 /* Pass unaligned single buffer with 4096 alignment required */ 1796 alignment = 4096; 1797 bdev->required_alignment = spdk_u32log2(alignment); 1798 1799 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1800 CU_ASSERT(rc == 0); 1801 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1802 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1803 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1804 alignment)); 1805 stub_complete_io(1); 1806 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1807 1808 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1809 CU_ASSERT(rc == 0); 1810 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1811 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1812 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1813 alignment)); 1814 stub_complete_io(1); 1815 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1816 1817 /* Pass aligned iovs with no alignment required */ 1818 alignment = 1; 1819 bdev->required_alignment = spdk_u32log2(alignment); 1820 1821 iovcnt = 1; 1822 iovs[0].iov_base = buf; 1823 iovs[0].iov_len = 512; 1824 1825 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1826 CU_ASSERT(rc == 0); 1827 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1828 stub_complete_io(1); 1829 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1830 1831 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1832 CU_ASSERT(rc == 0); 1833 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1834 stub_complete_io(1); 1835 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1836 1837 /* Pass unaligned iovs with no alignment required */ 1838 alignment = 1; 1839 bdev->required_alignment = spdk_u32log2(alignment); 1840 1841 iovcnt = 2; 1842 iovs[0].iov_base = buf + 16; 1843 iovs[0].iov_len = 256; 1844 iovs[1].iov_base = buf + 16 + 256 + 32; 1845 iovs[1].iov_len = 256; 1846 1847 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1848 CU_ASSERT(rc == 0); 1849 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1850 stub_complete_io(1); 1851 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1852 1853 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1854 CU_ASSERT(rc == 0); 1855 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1856 stub_complete_io(1); 1857 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1858 1859 /* Pass unaligned iov with 2048 alignment required */ 1860 alignment = 2048; 1861 bdev->required_alignment = spdk_u32log2(alignment); 1862 1863 iovcnt = 2; 1864 iovs[0].iov_base = buf + 16; 1865 iovs[0].iov_len = 256; 1866 iovs[1].iov_base = buf + 16 + 256 + 32; 1867 iovs[1].iov_len = 256; 1868 1869 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1870 CU_ASSERT(rc == 0); 1871 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1872 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1873 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1874 alignment)); 1875 stub_complete_io(1); 1876 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1877 1878 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1879 CU_ASSERT(rc == 0); 1880 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1881 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1882 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1883 alignment)); 1884 stub_complete_io(1); 1885 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1886 1887 /* Pass iov without allocated buffer without alignment required */ 1888 alignment = 1; 1889 bdev->required_alignment = spdk_u32log2(alignment); 1890 1891 iovcnt = 1; 1892 iovs[0].iov_base = NULL; 1893 iovs[0].iov_len = 0; 1894 1895 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1896 CU_ASSERT(rc == 0); 1897 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1898 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1899 alignment)); 1900 stub_complete_io(1); 1901 1902 /* Pass iov without allocated buffer with 1024 alignment required */ 1903 alignment = 1024; 1904 bdev->required_alignment = spdk_u32log2(alignment); 1905 1906 iovcnt = 1; 1907 iovs[0].iov_base = NULL; 1908 iovs[0].iov_len = 0; 1909 1910 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1911 CU_ASSERT(rc == 0); 1912 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1913 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1914 alignment)); 1915 stub_complete_io(1); 1916 1917 spdk_put_io_channel(io_ch); 1918 spdk_bdev_close(desc); 1919 free_bdev(bdev); 1920 spdk_bdev_finish(bdev_fini_cb, NULL); 1921 poll_threads(); 1922 1923 free(buf); 1924 } 1925 1926 static void 1927 bdev_io_alignment_with_boundary(void) 1928 { 1929 struct spdk_bdev *bdev; 1930 struct spdk_bdev_desc *desc = NULL; 1931 struct spdk_io_channel *io_ch; 1932 struct spdk_bdev_opts bdev_opts = { 1933 .bdev_io_pool_size = 20, 1934 .bdev_io_cache_size = 2, 1935 }; 1936 int rc; 1937 void *buf; 1938 struct iovec iovs[2]; 1939 int iovcnt; 1940 uint64_t alignment; 1941 1942 rc = spdk_bdev_set_opts(&bdev_opts); 1943 CU_ASSERT(rc == 0); 1944 spdk_bdev_initialize(bdev_init_cb, NULL); 1945 1946 fn_table.submit_request = stub_submit_request_aligned_buffer; 1947 bdev = allocate_bdev("bdev0"); 1948 1949 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 1950 CU_ASSERT(rc == 0); 1951 CU_ASSERT(desc != NULL); 1952 io_ch = spdk_bdev_get_io_channel(desc); 1953 CU_ASSERT(io_ch != NULL); 1954 1955 /* Create aligned buffer */ 1956 rc = posix_memalign(&buf, 4096, 131072); 1957 SPDK_CU_ASSERT_FATAL(rc == 0); 1958 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1959 1960 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 1961 alignment = 512; 1962 bdev->required_alignment = spdk_u32log2(alignment); 1963 bdev->optimal_io_boundary = 2; 1964 bdev->split_on_optimal_io_boundary = true; 1965 1966 iovcnt = 1; 1967 iovs[0].iov_base = NULL; 1968 iovs[0].iov_len = 512 * 3; 1969 1970 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 1971 CU_ASSERT(rc == 0); 1972 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1973 stub_complete_io(2); 1974 1975 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 1976 alignment = 512; 1977 bdev->required_alignment = spdk_u32log2(alignment); 1978 bdev->optimal_io_boundary = 16; 1979 bdev->split_on_optimal_io_boundary = true; 1980 1981 iovcnt = 1; 1982 iovs[0].iov_base = NULL; 1983 iovs[0].iov_len = 512 * 16; 1984 1985 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 1986 CU_ASSERT(rc == 0); 1987 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1988 stub_complete_io(2); 1989 1990 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 1991 alignment = 512; 1992 bdev->required_alignment = spdk_u32log2(alignment); 1993 bdev->optimal_io_boundary = 128; 1994 bdev->split_on_optimal_io_boundary = true; 1995 1996 iovcnt = 1; 1997 iovs[0].iov_base = buf + 16; 1998 iovs[0].iov_len = 512 * 160; 1999 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2000 CU_ASSERT(rc == 0); 2001 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2002 stub_complete_io(2); 2003 2004 /* 512 * 3 with 2 IO boundary */ 2005 alignment = 512; 2006 bdev->required_alignment = spdk_u32log2(alignment); 2007 bdev->optimal_io_boundary = 2; 2008 bdev->split_on_optimal_io_boundary = true; 2009 2010 iovcnt = 2; 2011 iovs[0].iov_base = buf + 16; 2012 iovs[0].iov_len = 512; 2013 iovs[1].iov_base = buf + 16 + 512 + 32; 2014 iovs[1].iov_len = 1024; 2015 2016 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2017 CU_ASSERT(rc == 0); 2018 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2019 stub_complete_io(2); 2020 2021 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2022 CU_ASSERT(rc == 0); 2023 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2024 stub_complete_io(2); 2025 2026 /* 512 * 64 with 32 IO boundary */ 2027 bdev->optimal_io_boundary = 32; 2028 iovcnt = 2; 2029 iovs[0].iov_base = buf + 16; 2030 iovs[0].iov_len = 16384; 2031 iovs[1].iov_base = buf + 16 + 16384 + 32; 2032 iovs[1].iov_len = 16384; 2033 2034 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2035 CU_ASSERT(rc == 0); 2036 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2037 stub_complete_io(3); 2038 2039 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2040 CU_ASSERT(rc == 0); 2041 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2042 stub_complete_io(3); 2043 2044 /* 512 * 160 with 32 IO boundary */ 2045 iovcnt = 1; 2046 iovs[0].iov_base = buf + 16; 2047 iovs[0].iov_len = 16384 + 65536; 2048 2049 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2050 CU_ASSERT(rc == 0); 2051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2052 stub_complete_io(6); 2053 2054 spdk_put_io_channel(io_ch); 2055 spdk_bdev_close(desc); 2056 free_bdev(bdev); 2057 spdk_bdev_finish(bdev_fini_cb, NULL); 2058 poll_threads(); 2059 2060 free(buf); 2061 } 2062 2063 static void 2064 histogram_status_cb(void *cb_arg, int status) 2065 { 2066 g_status = status; 2067 } 2068 2069 static void 2070 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 2071 { 2072 g_status = status; 2073 g_histogram = histogram; 2074 } 2075 2076 static void 2077 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 2078 uint64_t total, uint64_t so_far) 2079 { 2080 g_count += count; 2081 } 2082 2083 static void 2084 bdev_histograms(void) 2085 { 2086 struct spdk_bdev *bdev; 2087 struct spdk_bdev_desc *desc = NULL; 2088 struct spdk_io_channel *ch; 2089 struct spdk_histogram_data *histogram; 2090 uint8_t buf[4096]; 2091 int rc; 2092 2093 spdk_bdev_initialize(bdev_init_cb, NULL); 2094 2095 bdev = allocate_bdev("bdev"); 2096 2097 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2098 CU_ASSERT(rc == 0); 2099 CU_ASSERT(desc != NULL); 2100 2101 ch = spdk_bdev_get_io_channel(desc); 2102 CU_ASSERT(ch != NULL); 2103 2104 /* Enable histogram */ 2105 g_status = -1; 2106 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 2107 poll_threads(); 2108 CU_ASSERT(g_status == 0); 2109 CU_ASSERT(bdev->internal.histogram_enabled == true); 2110 2111 /* Allocate histogram */ 2112 histogram = spdk_histogram_data_alloc(); 2113 SPDK_CU_ASSERT_FATAL(histogram != NULL); 2114 2115 /* Check if histogram is zeroed */ 2116 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2117 poll_threads(); 2118 CU_ASSERT(g_status == 0); 2119 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2120 2121 g_count = 0; 2122 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2123 2124 CU_ASSERT(g_count == 0); 2125 2126 rc = spdk_bdev_write_blocks(desc, ch, &buf, 0, 1, io_done, NULL); 2127 CU_ASSERT(rc == 0); 2128 2129 spdk_delay_us(10); 2130 stub_complete_io(1); 2131 poll_threads(); 2132 2133 rc = spdk_bdev_read_blocks(desc, ch, &buf, 0, 1, io_done, NULL); 2134 CU_ASSERT(rc == 0); 2135 2136 spdk_delay_us(10); 2137 stub_complete_io(1); 2138 poll_threads(); 2139 2140 /* Check if histogram gathered data from all I/O channels */ 2141 g_histogram = NULL; 2142 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2143 poll_threads(); 2144 CU_ASSERT(g_status == 0); 2145 CU_ASSERT(bdev->internal.histogram_enabled == true); 2146 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2147 2148 g_count = 0; 2149 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2150 CU_ASSERT(g_count == 2); 2151 2152 /* Disable histogram */ 2153 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 2154 poll_threads(); 2155 CU_ASSERT(g_status == 0); 2156 CU_ASSERT(bdev->internal.histogram_enabled == false); 2157 2158 /* Try to run histogram commands on disabled bdev */ 2159 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2160 poll_threads(); 2161 CU_ASSERT(g_status == -EFAULT); 2162 2163 spdk_histogram_data_free(histogram); 2164 spdk_put_io_channel(ch); 2165 spdk_bdev_close(desc); 2166 free_bdev(bdev); 2167 spdk_bdev_finish(bdev_fini_cb, NULL); 2168 poll_threads(); 2169 } 2170 2171 static void 2172 bdev_write_zeroes(void) 2173 { 2174 struct spdk_bdev *bdev; 2175 struct spdk_bdev_desc *desc = NULL; 2176 struct spdk_io_channel *ioch; 2177 struct ut_expected_io *expected_io; 2178 uint64_t offset, num_io_blocks, num_blocks; 2179 uint32_t num_completed, num_requests; 2180 int rc; 2181 2182 spdk_bdev_initialize(bdev_init_cb, NULL); 2183 bdev = allocate_bdev("bdev"); 2184 2185 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); 2186 CU_ASSERT_EQUAL(rc, 0); 2187 SPDK_CU_ASSERT_FATAL(desc != NULL); 2188 ioch = spdk_bdev_get_io_channel(desc); 2189 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2190 2191 fn_table.submit_request = stub_submit_request; 2192 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2193 2194 /* First test that if the bdev supports write_zeroes, the request won't be split */ 2195 bdev->md_len = 0; 2196 bdev->blocklen = 4096; 2197 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2198 2199 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 2200 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2201 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2202 CU_ASSERT_EQUAL(rc, 0); 2203 num_completed = stub_complete_io(1); 2204 CU_ASSERT_EQUAL(num_completed, 1); 2205 2206 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 2207 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 2208 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 2209 num_requests = 2; 2210 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 2211 2212 for (offset = 0; offset < num_requests; ++offset) { 2213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2214 offset * num_io_blocks, num_io_blocks, 0); 2215 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2216 } 2217 2218 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2219 CU_ASSERT_EQUAL(rc, 0); 2220 num_completed = stub_complete_io(num_requests); 2221 CU_ASSERT_EQUAL(num_completed, num_requests); 2222 2223 /* Check that the splitting is correct if bdev has interleaved metadata */ 2224 bdev->md_interleave = true; 2225 bdev->md_len = 64; 2226 bdev->blocklen = 4096 + 64; 2227 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2228 2229 num_requests = offset = 0; 2230 while (offset < num_blocks) { 2231 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 2232 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2233 offset, num_io_blocks, 0); 2234 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2235 offset += num_io_blocks; 2236 num_requests++; 2237 } 2238 2239 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2240 CU_ASSERT_EQUAL(rc, 0); 2241 num_completed = stub_complete_io(num_requests); 2242 CU_ASSERT_EQUAL(num_completed, num_requests); 2243 num_completed = stub_complete_io(num_requests); 2244 assert(num_completed == 0); 2245 2246 /* Check the the same for separate metadata buffer */ 2247 bdev->md_interleave = false; 2248 bdev->md_len = 64; 2249 bdev->blocklen = 4096; 2250 2251 num_requests = offset = 0; 2252 while (offset < num_blocks) { 2253 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 2254 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2255 offset, num_io_blocks, 0); 2256 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 2257 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2258 offset += num_io_blocks; 2259 num_requests++; 2260 } 2261 2262 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2263 CU_ASSERT_EQUAL(rc, 0); 2264 num_completed = stub_complete_io(num_requests); 2265 CU_ASSERT_EQUAL(num_completed, num_requests); 2266 2267 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 2268 spdk_put_io_channel(ioch); 2269 spdk_bdev_close(desc); 2270 free_bdev(bdev); 2271 spdk_bdev_finish(bdev_fini_cb, NULL); 2272 poll_threads(); 2273 } 2274 2275 static void 2276 bdev_open_while_hotremove(void) 2277 { 2278 struct spdk_bdev *bdev; 2279 struct spdk_bdev_desc *desc[2] = {}; 2280 int rc; 2281 2282 bdev = allocate_bdev("bdev"); 2283 2284 rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[0]); 2285 CU_ASSERT(rc == 0); 2286 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 2287 2288 spdk_bdev_unregister(bdev, NULL, NULL); 2289 2290 rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[1]); 2291 CU_ASSERT(rc == -ENODEV); 2292 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 2293 2294 spdk_bdev_close(desc[0]); 2295 free_bdev(bdev); 2296 } 2297 2298 static void 2299 bdev_close_while_hotremove(void) 2300 { 2301 struct spdk_bdev *bdev; 2302 struct spdk_bdev_desc *desc = NULL; 2303 int rc = 0; 2304 2305 bdev = allocate_bdev("bdev"); 2306 2307 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 2308 CU_ASSERT_EQUAL(rc, 0); 2309 2310 /* Simulate hot-unplug by unregistering bdev */ 2311 g_event_type1 = 0xFF; 2312 g_unregister_arg = NULL; 2313 g_unregister_rc = -1; 2314 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 2315 /* Close device while remove event is in flight */ 2316 spdk_bdev_close(desc); 2317 2318 /* Ensure that unregister callback is delayed */ 2319 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 2320 CU_ASSERT_EQUAL(g_unregister_rc, -1); 2321 2322 poll_threads(); 2323 2324 /* Event callback shall not be issued because device was closed */ 2325 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 2326 /* Unregister callback is issued */ 2327 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 2328 CU_ASSERT_EQUAL(g_unregister_rc, 0); 2329 2330 free_bdev(bdev); 2331 } 2332 2333 static void 2334 bdev_open_ext(void) 2335 { 2336 struct spdk_bdev *bdev; 2337 struct spdk_bdev_desc *desc1 = NULL; 2338 struct spdk_bdev_desc *desc2 = NULL; 2339 int rc = 0; 2340 2341 bdev = allocate_bdev("bdev"); 2342 2343 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 2344 CU_ASSERT_EQUAL(rc, -EINVAL); 2345 2346 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 2347 CU_ASSERT_EQUAL(rc, 0); 2348 2349 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 2350 CU_ASSERT_EQUAL(rc, 0); 2351 2352 g_event_type1 = 0xFF; 2353 g_event_type2 = 0xFF; 2354 2355 /* Simulate hot-unplug by unregistering bdev */ 2356 spdk_bdev_unregister(bdev, NULL, NULL); 2357 poll_threads(); 2358 2359 /* Check if correct events have been triggered in event callback fn */ 2360 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 2361 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 2362 2363 free_bdev(bdev); 2364 poll_threads(); 2365 } 2366 2367 int 2368 main(int argc, char **argv) 2369 { 2370 CU_pSuite suite = NULL; 2371 unsigned int num_failures; 2372 2373 if (CU_initialize_registry() != CUE_SUCCESS) { 2374 return CU_get_error(); 2375 } 2376 2377 suite = CU_add_suite("bdev", null_init, null_clean); 2378 if (suite == NULL) { 2379 CU_cleanup_registry(); 2380 return CU_get_error(); 2381 } 2382 2383 if ( 2384 CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL || 2385 CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL || 2386 CU_add_test(suite, "io_valid", io_valid_test) == NULL || 2387 CU_add_test(suite, "open_write", open_write_test) == NULL || 2388 CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL || 2389 CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL || 2390 CU_add_test(suite, "bdev_io_types", bdev_io_types_test) == NULL || 2391 CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL || 2392 CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL || 2393 CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL || 2394 CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL || 2395 CU_add_test(suite, "bdev_io_alignment_with_boundary", bdev_io_alignment_with_boundary) == NULL || 2396 CU_add_test(suite, "bdev_io_alignment", bdev_io_alignment) == NULL || 2397 CU_add_test(suite, "bdev_histograms", bdev_histograms) == NULL || 2398 CU_add_test(suite, "bdev_write_zeroes", bdev_write_zeroes) == NULL || 2399 CU_add_test(suite, "bdev_open_while_hotremove", bdev_open_while_hotremove) == NULL || 2400 CU_add_test(suite, "bdev_close_while_hotremove", bdev_close_while_hotremove) == NULL || 2401 CU_add_test(suite, "bdev_open_ext", bdev_open_ext) == NULL 2402 ) { 2403 CU_cleanup_registry(); 2404 return CU_get_error(); 2405 } 2406 2407 allocate_threads(1); 2408 set_thread(0); 2409 2410 CU_basic_set_mode(CU_BRM_VERBOSE); 2411 CU_basic_run_tests(); 2412 num_failures = CU_get_number_of_failures(); 2413 CU_cleanup_registry(); 2414 2415 free_threads(); 2416 2417 return num_failures; 2418 } 2419