1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 struct spdk_trace_histories *g_trace_histories; 46 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 47 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 48 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 49 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 50 uint16_t tpoint_id, uint8_t owner_type, 51 uint8_t object_type, uint8_t new_object, 52 uint8_t arg1_type, const char *arg1_name)); 53 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 54 uint32_t size, uint64_t object_id, uint64_t arg1)); 55 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 56 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 57 58 59 int g_status; 60 int g_count; 61 enum spdk_bdev_event_type g_event_type1; 62 enum spdk_bdev_event_type g_event_type2; 63 struct spdk_histogram_data *g_histogram; 64 void *g_unregister_arg; 65 int g_unregister_rc; 66 67 void 68 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 69 int *sc, int *sk, int *asc, int *ascq) 70 { 71 } 72 73 static int 74 null_init(void) 75 { 76 return 0; 77 } 78 79 static int 80 null_clean(void) 81 { 82 return 0; 83 } 84 85 static int 86 stub_destruct(void *ctx) 87 { 88 return 0; 89 } 90 91 struct ut_expected_io { 92 uint8_t type; 93 uint64_t offset; 94 uint64_t length; 95 int iovcnt; 96 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 97 void *md_buf; 98 TAILQ_ENTRY(ut_expected_io) link; 99 }; 100 101 struct bdev_ut_channel { 102 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 103 uint32_t outstanding_io_count; 104 TAILQ_HEAD(, ut_expected_io) expected_io; 105 }; 106 107 static bool g_io_done; 108 static struct spdk_bdev_io *g_bdev_io; 109 static enum spdk_bdev_io_status g_io_status; 110 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 111 static uint32_t g_bdev_ut_io_device; 112 static struct bdev_ut_channel *g_bdev_ut_channel; 113 static void *g_compare_read_buf; 114 static uint32_t g_compare_read_buf_len; 115 static void *g_compare_write_buf; 116 static uint32_t g_compare_write_buf_len; 117 static bool g_abort_done; 118 static enum spdk_bdev_io_status g_abort_status; 119 120 static struct ut_expected_io * 121 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 122 { 123 struct ut_expected_io *expected_io; 124 125 expected_io = calloc(1, sizeof(*expected_io)); 126 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 127 128 expected_io->type = type; 129 expected_io->offset = offset; 130 expected_io->length = length; 131 expected_io->iovcnt = iovcnt; 132 133 return expected_io; 134 } 135 136 static void 137 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 138 { 139 expected_io->iov[pos].iov_base = base; 140 expected_io->iov[pos].iov_len = len; 141 } 142 143 static void 144 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 145 { 146 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 147 struct ut_expected_io *expected_io; 148 struct iovec *iov, *expected_iov; 149 struct spdk_bdev_io *bio_to_abort; 150 int i; 151 152 g_bdev_io = bdev_io; 153 154 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 155 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 156 157 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 158 CU_ASSERT(g_compare_read_buf_len == len); 159 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 160 } 161 162 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 163 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 164 165 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 166 CU_ASSERT(g_compare_write_buf_len == len); 167 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 168 } 169 170 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 171 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 172 173 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 174 CU_ASSERT(g_compare_read_buf_len == len); 175 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 176 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 177 } 178 } 179 180 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 181 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 182 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 183 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 184 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 185 ch->outstanding_io_count--; 186 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 187 break; 188 } 189 } 190 } 191 } 192 193 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 194 ch->outstanding_io_count++; 195 196 expected_io = TAILQ_FIRST(&ch->expected_io); 197 if (expected_io == NULL) { 198 return; 199 } 200 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 201 202 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 203 CU_ASSERT(bdev_io->type == expected_io->type); 204 } 205 206 if (expected_io->md_buf != NULL) { 207 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 208 } 209 210 if (expected_io->length == 0) { 211 free(expected_io); 212 return; 213 } 214 215 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 216 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 217 218 if (expected_io->iovcnt == 0) { 219 free(expected_io); 220 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 221 return; 222 } 223 224 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 225 for (i = 0; i < expected_io->iovcnt; i++) { 226 iov = &bdev_io->u.bdev.iovs[i]; 227 expected_iov = &expected_io->iov[i]; 228 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 229 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 230 } 231 232 free(expected_io); 233 } 234 235 static void 236 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 237 struct spdk_bdev_io *bdev_io, bool success) 238 { 239 CU_ASSERT(success == true); 240 241 stub_submit_request(_ch, bdev_io); 242 } 243 244 static void 245 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 246 { 247 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 248 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 249 } 250 251 static uint32_t 252 stub_complete_io(uint32_t num_to_complete) 253 { 254 struct bdev_ut_channel *ch = g_bdev_ut_channel; 255 struct spdk_bdev_io *bdev_io; 256 static enum spdk_bdev_io_status io_status; 257 uint32_t num_completed = 0; 258 259 while (num_completed < num_to_complete) { 260 if (TAILQ_EMPTY(&ch->outstanding_io)) { 261 break; 262 } 263 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 264 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 265 ch->outstanding_io_count--; 266 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 267 g_io_exp_status; 268 spdk_bdev_io_complete(bdev_io, io_status); 269 num_completed++; 270 } 271 272 return num_completed; 273 } 274 275 static struct spdk_io_channel * 276 bdev_ut_get_io_channel(void *ctx) 277 { 278 return spdk_get_io_channel(&g_bdev_ut_io_device); 279 } 280 281 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 282 [SPDK_BDEV_IO_TYPE_READ] = true, 283 [SPDK_BDEV_IO_TYPE_WRITE] = true, 284 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 285 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 286 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 287 [SPDK_BDEV_IO_TYPE_RESET] = true, 288 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 289 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 290 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 291 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 292 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 293 [SPDK_BDEV_IO_TYPE_ABORT] = true, 294 }; 295 296 static void 297 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 298 { 299 g_io_types_supported[io_type] = enable; 300 } 301 302 static bool 303 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 304 { 305 return g_io_types_supported[io_type]; 306 } 307 308 static struct spdk_bdev_fn_table fn_table = { 309 .destruct = stub_destruct, 310 .submit_request = stub_submit_request, 311 .get_io_channel = bdev_ut_get_io_channel, 312 .io_type_supported = stub_io_type_supported, 313 }; 314 315 static int 316 bdev_ut_create_ch(void *io_device, void *ctx_buf) 317 { 318 struct bdev_ut_channel *ch = ctx_buf; 319 320 CU_ASSERT(g_bdev_ut_channel == NULL); 321 g_bdev_ut_channel = ch; 322 323 TAILQ_INIT(&ch->outstanding_io); 324 ch->outstanding_io_count = 0; 325 TAILQ_INIT(&ch->expected_io); 326 return 0; 327 } 328 329 static void 330 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 331 { 332 CU_ASSERT(g_bdev_ut_channel != NULL); 333 g_bdev_ut_channel = NULL; 334 } 335 336 struct spdk_bdev_module bdev_ut_if; 337 338 static int 339 bdev_ut_module_init(void) 340 { 341 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 342 sizeof(struct bdev_ut_channel), NULL); 343 spdk_bdev_module_init_done(&bdev_ut_if); 344 return 0; 345 } 346 347 static void 348 bdev_ut_module_fini(void) 349 { 350 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 351 } 352 353 struct spdk_bdev_module bdev_ut_if = { 354 .name = "bdev_ut", 355 .module_init = bdev_ut_module_init, 356 .module_fini = bdev_ut_module_fini, 357 .async_init = true, 358 }; 359 360 static void vbdev_ut_examine(struct spdk_bdev *bdev); 361 362 static int 363 vbdev_ut_module_init(void) 364 { 365 return 0; 366 } 367 368 static void 369 vbdev_ut_module_fini(void) 370 { 371 } 372 373 struct spdk_bdev_module vbdev_ut_if = { 374 .name = "vbdev_ut", 375 .module_init = vbdev_ut_module_init, 376 .module_fini = vbdev_ut_module_fini, 377 .examine_config = vbdev_ut_examine, 378 }; 379 380 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 381 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 382 383 static void 384 vbdev_ut_examine(struct spdk_bdev *bdev) 385 { 386 spdk_bdev_module_examine_done(&vbdev_ut_if); 387 } 388 389 static struct spdk_bdev * 390 allocate_bdev(char *name) 391 { 392 struct spdk_bdev *bdev; 393 int rc; 394 395 bdev = calloc(1, sizeof(*bdev)); 396 SPDK_CU_ASSERT_FATAL(bdev != NULL); 397 398 bdev->name = name; 399 bdev->fn_table = &fn_table; 400 bdev->module = &bdev_ut_if; 401 bdev->blockcnt = 1024; 402 bdev->blocklen = 512; 403 404 rc = spdk_bdev_register(bdev); 405 CU_ASSERT(rc == 0); 406 407 return bdev; 408 } 409 410 static struct spdk_bdev * 411 allocate_vbdev(char *name) 412 { 413 struct spdk_bdev *bdev; 414 int rc; 415 416 bdev = calloc(1, sizeof(*bdev)); 417 SPDK_CU_ASSERT_FATAL(bdev != NULL); 418 419 bdev->name = name; 420 bdev->fn_table = &fn_table; 421 bdev->module = &vbdev_ut_if; 422 423 rc = spdk_bdev_register(bdev); 424 CU_ASSERT(rc == 0); 425 426 return bdev; 427 } 428 429 static void 430 free_bdev(struct spdk_bdev *bdev) 431 { 432 spdk_bdev_unregister(bdev, NULL, NULL); 433 poll_threads(); 434 memset(bdev, 0xFF, sizeof(*bdev)); 435 free(bdev); 436 } 437 438 static void 439 free_vbdev(struct spdk_bdev *bdev) 440 { 441 spdk_bdev_unregister(bdev, NULL, NULL); 442 poll_threads(); 443 memset(bdev, 0xFF, sizeof(*bdev)); 444 free(bdev); 445 } 446 447 static void 448 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 449 { 450 const char *bdev_name; 451 452 CU_ASSERT(bdev != NULL); 453 CU_ASSERT(rc == 0); 454 bdev_name = spdk_bdev_get_name(bdev); 455 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 456 457 free(stat); 458 459 *(bool *)cb_arg = true; 460 } 461 462 static void 463 bdev_unregister_cb(void *cb_arg, int rc) 464 { 465 g_unregister_arg = cb_arg; 466 g_unregister_rc = rc; 467 } 468 469 static void 470 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 471 { 472 } 473 474 static void 475 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 476 { 477 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 478 479 g_event_type1 = type; 480 if (SPDK_BDEV_EVENT_REMOVE == type) { 481 spdk_bdev_close(desc); 482 } 483 } 484 485 static void 486 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 487 { 488 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 489 490 g_event_type2 = type; 491 if (SPDK_BDEV_EVENT_REMOVE == type) { 492 spdk_bdev_close(desc); 493 } 494 } 495 496 static void 497 get_device_stat_test(void) 498 { 499 struct spdk_bdev *bdev; 500 struct spdk_bdev_io_stat *stat; 501 bool done; 502 503 bdev = allocate_bdev("bdev0"); 504 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 505 if (stat == NULL) { 506 free_bdev(bdev); 507 return; 508 } 509 510 done = false; 511 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 512 while (!done) { poll_threads(); } 513 514 free_bdev(bdev); 515 } 516 517 static void 518 open_write_test(void) 519 { 520 struct spdk_bdev *bdev[9]; 521 struct spdk_bdev_desc *desc[9] = {}; 522 int rc; 523 524 /* 525 * Create a tree of bdevs to test various open w/ write cases. 526 * 527 * bdev0 through bdev3 are physical block devices, such as NVMe 528 * namespaces or Ceph block devices. 529 * 530 * bdev4 is a virtual bdev with multiple base bdevs. This models 531 * caching or RAID use cases. 532 * 533 * bdev5 through bdev7 are all virtual bdevs with the same base 534 * bdev (except bdev7). This models partitioning or logical volume 535 * use cases. 536 * 537 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 538 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 539 * models caching, RAID, partitioning or logical volumes use cases. 540 * 541 * bdev8 is a virtual bdev with multiple base bdevs, but these 542 * base bdevs are themselves virtual bdevs. 543 * 544 * bdev8 545 * | 546 * +----------+ 547 * | | 548 * bdev4 bdev5 bdev6 bdev7 549 * | | | | 550 * +---+---+ +---+ + +---+---+ 551 * | | \ | / \ 552 * bdev0 bdev1 bdev2 bdev3 553 */ 554 555 bdev[0] = allocate_bdev("bdev0"); 556 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 557 CU_ASSERT(rc == 0); 558 559 bdev[1] = allocate_bdev("bdev1"); 560 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 561 CU_ASSERT(rc == 0); 562 563 bdev[2] = allocate_bdev("bdev2"); 564 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 565 CU_ASSERT(rc == 0); 566 567 bdev[3] = allocate_bdev("bdev3"); 568 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 569 CU_ASSERT(rc == 0); 570 571 bdev[4] = allocate_vbdev("bdev4"); 572 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 573 CU_ASSERT(rc == 0); 574 575 bdev[5] = allocate_vbdev("bdev5"); 576 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 577 CU_ASSERT(rc == 0); 578 579 bdev[6] = allocate_vbdev("bdev6"); 580 581 bdev[7] = allocate_vbdev("bdev7"); 582 583 bdev[8] = allocate_vbdev("bdev8"); 584 585 /* Open bdev0 read-only. This should succeed. */ 586 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 587 CU_ASSERT(rc == 0); 588 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 589 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 590 spdk_bdev_close(desc[0]); 591 592 /* 593 * Open bdev1 read/write. This should fail since bdev1 has been claimed 594 * by a vbdev module. 595 */ 596 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 597 CU_ASSERT(rc == -EPERM); 598 599 /* 600 * Open bdev4 read/write. This should fail since bdev3 has been claimed 601 * by a vbdev module. 602 */ 603 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 604 CU_ASSERT(rc == -EPERM); 605 606 /* Open bdev4 read-only. This should succeed. */ 607 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 608 CU_ASSERT(rc == 0); 609 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 610 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 611 spdk_bdev_close(desc[4]); 612 613 /* 614 * Open bdev8 read/write. This should succeed since it is a leaf 615 * bdev. 616 */ 617 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 618 CU_ASSERT(rc == 0); 619 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 620 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 621 spdk_bdev_close(desc[8]); 622 623 /* 624 * Open bdev5 read/write. This should fail since bdev4 has been claimed 625 * by a vbdev module. 626 */ 627 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 628 CU_ASSERT(rc == -EPERM); 629 630 /* Open bdev4 read-only. This should succeed. */ 631 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 632 CU_ASSERT(rc == 0); 633 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 634 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 635 spdk_bdev_close(desc[5]); 636 637 free_vbdev(bdev[8]); 638 639 free_vbdev(bdev[5]); 640 free_vbdev(bdev[6]); 641 free_vbdev(bdev[7]); 642 643 free_vbdev(bdev[4]); 644 645 free_bdev(bdev[0]); 646 free_bdev(bdev[1]); 647 free_bdev(bdev[2]); 648 free_bdev(bdev[3]); 649 } 650 651 static void 652 bytes_to_blocks_test(void) 653 { 654 struct spdk_bdev bdev; 655 uint64_t offset_blocks, num_blocks; 656 657 memset(&bdev, 0, sizeof(bdev)); 658 659 bdev.blocklen = 512; 660 661 /* All parameters valid */ 662 offset_blocks = 0; 663 num_blocks = 0; 664 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 665 CU_ASSERT(offset_blocks == 1); 666 CU_ASSERT(num_blocks == 2); 667 668 /* Offset not a block multiple */ 669 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 670 671 /* Length not a block multiple */ 672 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 673 674 /* In case blocklen not the power of two */ 675 bdev.blocklen = 100; 676 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 677 CU_ASSERT(offset_blocks == 1); 678 CU_ASSERT(num_blocks == 2); 679 680 /* Offset not a block multiple */ 681 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 682 683 /* Length not a block multiple */ 684 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 685 } 686 687 static void 688 num_blocks_test(void) 689 { 690 struct spdk_bdev bdev; 691 struct spdk_bdev_desc *desc = NULL; 692 struct spdk_bdev_desc *desc_ext = NULL; 693 int rc; 694 695 memset(&bdev, 0, sizeof(bdev)); 696 bdev.name = "num_blocks"; 697 bdev.fn_table = &fn_table; 698 bdev.module = &bdev_ut_if; 699 spdk_bdev_register(&bdev); 700 spdk_bdev_notify_blockcnt_change(&bdev, 50); 701 702 /* Growing block number */ 703 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 704 /* Shrinking block number */ 705 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 706 707 /* In case bdev opened */ 708 rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc); 709 CU_ASSERT(rc == 0); 710 SPDK_CU_ASSERT_FATAL(desc != NULL); 711 712 /* Growing block number */ 713 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 714 /* Shrinking block number */ 715 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 716 717 /* In case bdev opened with ext API */ 718 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext); 719 CU_ASSERT(rc == 0); 720 SPDK_CU_ASSERT_FATAL(desc_ext != NULL); 721 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc_ext)); 722 723 g_event_type1 = 0xFF; 724 /* Growing block number */ 725 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 726 727 poll_threads(); 728 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 729 730 g_event_type1 = 0xFF; 731 /* Growing block number and closing */ 732 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 733 734 spdk_bdev_close(desc); 735 spdk_bdev_close(desc_ext); 736 spdk_bdev_unregister(&bdev, NULL, NULL); 737 738 poll_threads(); 739 740 /* Callback is not called for closed device */ 741 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 742 } 743 744 static void 745 io_valid_test(void) 746 { 747 struct spdk_bdev bdev; 748 749 memset(&bdev, 0, sizeof(bdev)); 750 751 bdev.blocklen = 512; 752 spdk_bdev_notify_blockcnt_change(&bdev, 100); 753 754 /* All parameters valid */ 755 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 756 757 /* Last valid block */ 758 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 759 760 /* Offset past end of bdev */ 761 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 762 763 /* Offset + length past end of bdev */ 764 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 765 766 /* Offset near end of uint64_t range (2^64 - 1) */ 767 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 768 } 769 770 static void 771 alias_add_del_test(void) 772 { 773 struct spdk_bdev *bdev[3]; 774 int rc; 775 776 /* Creating and registering bdevs */ 777 bdev[0] = allocate_bdev("bdev0"); 778 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 779 780 bdev[1] = allocate_bdev("bdev1"); 781 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 782 783 bdev[2] = allocate_bdev("bdev2"); 784 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 785 786 poll_threads(); 787 788 /* 789 * Trying adding an alias identical to name. 790 * Alias is identical to name, so it can not be added to aliases list 791 */ 792 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 793 CU_ASSERT(rc == -EEXIST); 794 795 /* 796 * Trying to add empty alias, 797 * this one should fail 798 */ 799 rc = spdk_bdev_alias_add(bdev[0], NULL); 800 CU_ASSERT(rc == -EINVAL); 801 802 /* Trying adding same alias to two different registered bdevs */ 803 804 /* Alias is used first time, so this one should pass */ 805 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 806 CU_ASSERT(rc == 0); 807 808 /* Alias was added to another bdev, so this one should fail */ 809 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 810 CU_ASSERT(rc == -EEXIST); 811 812 /* Alias is used first time, so this one should pass */ 813 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 814 CU_ASSERT(rc == 0); 815 816 /* Trying removing an alias from registered bdevs */ 817 818 /* Alias is not on a bdev aliases list, so this one should fail */ 819 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 820 CU_ASSERT(rc == -ENOENT); 821 822 /* Alias is present on a bdev aliases list, so this one should pass */ 823 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 824 CU_ASSERT(rc == 0); 825 826 /* Alias is present on a bdev aliases list, so this one should pass */ 827 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 828 CU_ASSERT(rc == 0); 829 830 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 831 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 832 CU_ASSERT(rc != 0); 833 834 /* Trying to del all alias from empty alias list */ 835 spdk_bdev_alias_del_all(bdev[2]); 836 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 837 838 /* Trying to del all alias from non-empty alias list */ 839 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 840 CU_ASSERT(rc == 0); 841 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 842 CU_ASSERT(rc == 0); 843 spdk_bdev_alias_del_all(bdev[2]); 844 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 845 846 /* Unregister and free bdevs */ 847 spdk_bdev_unregister(bdev[0], NULL, NULL); 848 spdk_bdev_unregister(bdev[1], NULL, NULL); 849 spdk_bdev_unregister(bdev[2], NULL, NULL); 850 851 poll_threads(); 852 853 free(bdev[0]); 854 free(bdev[1]); 855 free(bdev[2]); 856 } 857 858 static void 859 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 860 { 861 g_io_done = true; 862 g_io_status = bdev_io->internal.status; 863 spdk_bdev_free_io(bdev_io); 864 } 865 866 static void 867 bdev_init_cb(void *arg, int rc) 868 { 869 CU_ASSERT(rc == 0); 870 } 871 872 static void 873 bdev_fini_cb(void *arg) 874 { 875 } 876 877 struct bdev_ut_io_wait_entry { 878 struct spdk_bdev_io_wait_entry entry; 879 struct spdk_io_channel *io_ch; 880 struct spdk_bdev_desc *desc; 881 bool submitted; 882 }; 883 884 static void 885 io_wait_cb(void *arg) 886 { 887 struct bdev_ut_io_wait_entry *entry = arg; 888 int rc; 889 890 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 891 CU_ASSERT(rc == 0); 892 entry->submitted = true; 893 } 894 895 static void 896 bdev_io_types_test(void) 897 { 898 struct spdk_bdev *bdev; 899 struct spdk_bdev_desc *desc = NULL; 900 struct spdk_io_channel *io_ch; 901 struct spdk_bdev_opts bdev_opts = { 902 .bdev_io_pool_size = 4, 903 .bdev_io_cache_size = 2, 904 }; 905 int rc; 906 907 rc = spdk_bdev_set_opts(&bdev_opts); 908 CU_ASSERT(rc == 0); 909 spdk_bdev_initialize(bdev_init_cb, NULL); 910 poll_threads(); 911 912 bdev = allocate_bdev("bdev0"); 913 914 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 915 CU_ASSERT(rc == 0); 916 poll_threads(); 917 SPDK_CU_ASSERT_FATAL(desc != NULL); 918 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 919 io_ch = spdk_bdev_get_io_channel(desc); 920 CU_ASSERT(io_ch != NULL); 921 922 /* WRITE and WRITE ZEROES are not supported */ 923 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 924 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 925 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 926 CU_ASSERT(rc == -ENOTSUP); 927 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 928 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 929 930 spdk_put_io_channel(io_ch); 931 spdk_bdev_close(desc); 932 free_bdev(bdev); 933 spdk_bdev_finish(bdev_fini_cb, NULL); 934 poll_threads(); 935 } 936 937 static void 938 bdev_io_wait_test(void) 939 { 940 struct spdk_bdev *bdev; 941 struct spdk_bdev_desc *desc = NULL; 942 struct spdk_io_channel *io_ch; 943 struct spdk_bdev_opts bdev_opts = { 944 .bdev_io_pool_size = 4, 945 .bdev_io_cache_size = 2, 946 }; 947 struct bdev_ut_io_wait_entry io_wait_entry; 948 struct bdev_ut_io_wait_entry io_wait_entry2; 949 int rc; 950 951 rc = spdk_bdev_set_opts(&bdev_opts); 952 CU_ASSERT(rc == 0); 953 spdk_bdev_initialize(bdev_init_cb, NULL); 954 poll_threads(); 955 956 bdev = allocate_bdev("bdev0"); 957 958 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 959 CU_ASSERT(rc == 0); 960 poll_threads(); 961 SPDK_CU_ASSERT_FATAL(desc != NULL); 962 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 963 io_ch = spdk_bdev_get_io_channel(desc); 964 CU_ASSERT(io_ch != NULL); 965 966 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 967 CU_ASSERT(rc == 0); 968 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 969 CU_ASSERT(rc == 0); 970 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 971 CU_ASSERT(rc == 0); 972 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 973 CU_ASSERT(rc == 0); 974 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 975 976 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 977 CU_ASSERT(rc == -ENOMEM); 978 979 io_wait_entry.entry.bdev = bdev; 980 io_wait_entry.entry.cb_fn = io_wait_cb; 981 io_wait_entry.entry.cb_arg = &io_wait_entry; 982 io_wait_entry.io_ch = io_ch; 983 io_wait_entry.desc = desc; 984 io_wait_entry.submitted = false; 985 /* Cannot use the same io_wait_entry for two different calls. */ 986 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 987 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 988 989 /* Queue two I/O waits. */ 990 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 991 CU_ASSERT(rc == 0); 992 CU_ASSERT(io_wait_entry.submitted == false); 993 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 994 CU_ASSERT(rc == 0); 995 CU_ASSERT(io_wait_entry2.submitted == false); 996 997 stub_complete_io(1); 998 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 999 CU_ASSERT(io_wait_entry.submitted == true); 1000 CU_ASSERT(io_wait_entry2.submitted == false); 1001 1002 stub_complete_io(1); 1003 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1004 CU_ASSERT(io_wait_entry2.submitted == true); 1005 1006 stub_complete_io(4); 1007 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1008 1009 spdk_put_io_channel(io_ch); 1010 spdk_bdev_close(desc); 1011 free_bdev(bdev); 1012 spdk_bdev_finish(bdev_fini_cb, NULL); 1013 poll_threads(); 1014 } 1015 1016 static void 1017 bdev_io_spans_boundary_test(void) 1018 { 1019 struct spdk_bdev bdev; 1020 struct spdk_bdev_io bdev_io; 1021 1022 memset(&bdev, 0, sizeof(bdev)); 1023 1024 bdev.optimal_io_boundary = 0; 1025 bdev_io.bdev = &bdev; 1026 1027 /* bdev has no optimal_io_boundary set - so this should return false. */ 1028 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1029 1030 bdev.optimal_io_boundary = 32; 1031 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1032 1033 /* RESETs are not based on LBAs - so this should return false. */ 1034 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1035 1036 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1037 bdev_io.u.bdev.offset_blocks = 0; 1038 bdev_io.u.bdev.num_blocks = 32; 1039 1040 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1041 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1042 1043 bdev_io.u.bdev.num_blocks = 33; 1044 1045 /* This I/O spans a boundary. */ 1046 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1047 } 1048 1049 static void 1050 bdev_io_split_test(void) 1051 { 1052 struct spdk_bdev *bdev; 1053 struct spdk_bdev_desc *desc = NULL; 1054 struct spdk_io_channel *io_ch; 1055 struct spdk_bdev_opts bdev_opts = { 1056 .bdev_io_pool_size = 512, 1057 .bdev_io_cache_size = 64, 1058 }; 1059 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1060 struct ut_expected_io *expected_io; 1061 void *md_buf = (void *)0xFF000000; 1062 uint64_t i; 1063 int rc; 1064 1065 rc = spdk_bdev_set_opts(&bdev_opts); 1066 CU_ASSERT(rc == 0); 1067 spdk_bdev_initialize(bdev_init_cb, NULL); 1068 1069 bdev = allocate_bdev("bdev0"); 1070 1071 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1072 CU_ASSERT(rc == 0); 1073 SPDK_CU_ASSERT_FATAL(desc != NULL); 1074 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1075 io_ch = spdk_bdev_get_io_channel(desc); 1076 CU_ASSERT(io_ch != NULL); 1077 1078 bdev->optimal_io_boundary = 16; 1079 bdev->split_on_optimal_io_boundary = false; 1080 1081 g_io_done = false; 1082 1083 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1084 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1085 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1086 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1087 1088 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1089 CU_ASSERT(rc == 0); 1090 CU_ASSERT(g_io_done == false); 1091 1092 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1093 stub_complete_io(1); 1094 CU_ASSERT(g_io_done == true); 1095 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1096 1097 bdev->split_on_optimal_io_boundary = true; 1098 bdev->md_interleave = false; 1099 bdev->md_len = 8; 1100 1101 /* Now test that a single-vector command is split correctly. 1102 * Offset 14, length 8, payload 0xF000 1103 * Child - Offset 14, length 2, payload 0xF000 1104 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1105 * 1106 * Set up the expected values before calling spdk_bdev_read_blocks 1107 */ 1108 g_io_done = false; 1109 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1110 expected_io->md_buf = md_buf; 1111 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1112 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1113 1114 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1115 expected_io->md_buf = md_buf + 2 * 8; 1116 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1117 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1118 1119 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1120 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1121 14, 8, io_done, NULL); 1122 CU_ASSERT(rc == 0); 1123 CU_ASSERT(g_io_done == false); 1124 1125 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1126 stub_complete_io(2); 1127 CU_ASSERT(g_io_done == true); 1128 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1129 1130 /* Now set up a more complex, multi-vector command that needs to be split, 1131 * including splitting iovecs. 1132 */ 1133 iov[0].iov_base = (void *)0x10000; 1134 iov[0].iov_len = 512; 1135 iov[1].iov_base = (void *)0x20000; 1136 iov[1].iov_len = 20 * 512; 1137 iov[2].iov_base = (void *)0x30000; 1138 iov[2].iov_len = 11 * 512; 1139 1140 g_io_done = false; 1141 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1142 expected_io->md_buf = md_buf; 1143 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1144 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1145 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1146 1147 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1148 expected_io->md_buf = md_buf + 2 * 8; 1149 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1150 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1151 1152 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1153 expected_io->md_buf = md_buf + 18 * 8; 1154 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1155 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1156 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1157 1158 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1159 14, 32, io_done, NULL); 1160 CU_ASSERT(rc == 0); 1161 CU_ASSERT(g_io_done == false); 1162 1163 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1164 stub_complete_io(3); 1165 CU_ASSERT(g_io_done == true); 1166 1167 /* Test multi vector command that needs to be split by strip and then needs to be 1168 * split further due to the capacity of child iovs. 1169 */ 1170 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1171 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1172 iov[i].iov_len = 512; 1173 } 1174 1175 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1176 g_io_done = false; 1177 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1178 BDEV_IO_NUM_CHILD_IOV); 1179 expected_io->md_buf = md_buf; 1180 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1181 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1182 } 1183 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1184 1185 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1186 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1187 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1188 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1189 ut_expected_io_set_iov(expected_io, i, 1190 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1191 } 1192 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1193 1194 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1195 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1196 CU_ASSERT(rc == 0); 1197 CU_ASSERT(g_io_done == false); 1198 1199 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1200 stub_complete_io(1); 1201 CU_ASSERT(g_io_done == false); 1202 1203 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1204 stub_complete_io(1); 1205 CU_ASSERT(g_io_done == true); 1206 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1207 1208 /* Test multi vector command that needs to be split by strip and then needs to be 1209 * split further due to the capacity of child iovs. In this case, the length of 1210 * the rest of iovec array with an I/O boundary is the multiple of block size. 1211 */ 1212 1213 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1214 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1215 */ 1216 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1217 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1218 iov[i].iov_len = 512; 1219 } 1220 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1221 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1222 iov[i].iov_len = 256; 1223 } 1224 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1225 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1226 1227 /* Add an extra iovec to trigger split */ 1228 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1229 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1230 1231 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1232 g_io_done = false; 1233 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1234 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1235 expected_io->md_buf = md_buf; 1236 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1237 ut_expected_io_set_iov(expected_io, i, 1238 (void *)((i + 1) * 0x10000), 512); 1239 } 1240 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1241 ut_expected_io_set_iov(expected_io, i, 1242 (void *)((i + 1) * 0x10000), 256); 1243 } 1244 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1245 1246 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1247 1, 1); 1248 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1249 ut_expected_io_set_iov(expected_io, 0, 1250 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1251 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1252 1253 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1254 1, 1); 1255 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1256 ut_expected_io_set_iov(expected_io, 0, 1257 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1258 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1259 1260 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1261 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1262 CU_ASSERT(rc == 0); 1263 CU_ASSERT(g_io_done == false); 1264 1265 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1266 stub_complete_io(1); 1267 CU_ASSERT(g_io_done == false); 1268 1269 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1270 stub_complete_io(2); 1271 CU_ASSERT(g_io_done == true); 1272 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1273 1274 /* Test multi vector command that needs to be split by strip and then needs to be 1275 * split further due to the capacity of child iovs, the child request offset should 1276 * be rewind to last aligned offset and go success without error. 1277 */ 1278 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1279 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1280 iov[i].iov_len = 512; 1281 } 1282 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1283 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1284 1285 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1286 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1287 1288 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1289 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1290 1291 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1292 g_io_done = false; 1293 g_io_status = 0; 1294 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1295 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1296 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1297 expected_io->md_buf = md_buf; 1298 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1299 ut_expected_io_set_iov(expected_io, i, 1300 (void *)((i + 1) * 0x10000), 512); 1301 } 1302 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1303 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1304 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1305 1, 2); 1306 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1307 ut_expected_io_set_iov(expected_io, 0, 1308 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1309 ut_expected_io_set_iov(expected_io, 1, 1310 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1311 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1312 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1313 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1314 1, 1); 1315 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1316 ut_expected_io_set_iov(expected_io, 0, 1317 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1318 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1319 1320 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1321 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1322 CU_ASSERT(rc == 0); 1323 CU_ASSERT(g_io_done == false); 1324 1325 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1326 stub_complete_io(1); 1327 CU_ASSERT(g_io_done == false); 1328 1329 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1330 stub_complete_io(2); 1331 CU_ASSERT(g_io_done == true); 1332 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1333 1334 /* Test multi vector command that needs to be split due to the IO boundary and 1335 * the capacity of child iovs. Especially test the case when the command is 1336 * split due to the capacity of child iovs, the tail address is not aligned with 1337 * block size and is rewinded to the aligned address. 1338 * 1339 * The iovecs used in read request is complex but is based on the data 1340 * collected in the real issue. We change the base addresses but keep the lengths 1341 * not to loose the credibility of the test. 1342 */ 1343 bdev->optimal_io_boundary = 128; 1344 g_io_done = false; 1345 g_io_status = 0; 1346 1347 for (i = 0; i < 31; i++) { 1348 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1349 iov[i].iov_len = 1024; 1350 } 1351 iov[31].iov_base = (void *)0xFEED1F00000; 1352 iov[31].iov_len = 32768; 1353 iov[32].iov_base = (void *)0xFEED2000000; 1354 iov[32].iov_len = 160; 1355 iov[33].iov_base = (void *)0xFEED2100000; 1356 iov[33].iov_len = 4096; 1357 iov[34].iov_base = (void *)0xFEED2200000; 1358 iov[34].iov_len = 4096; 1359 iov[35].iov_base = (void *)0xFEED2300000; 1360 iov[35].iov_len = 4096; 1361 iov[36].iov_base = (void *)0xFEED2400000; 1362 iov[36].iov_len = 4096; 1363 iov[37].iov_base = (void *)0xFEED2500000; 1364 iov[37].iov_len = 4096; 1365 iov[38].iov_base = (void *)0xFEED2600000; 1366 iov[38].iov_len = 4096; 1367 iov[39].iov_base = (void *)0xFEED2700000; 1368 iov[39].iov_len = 4096; 1369 iov[40].iov_base = (void *)0xFEED2800000; 1370 iov[40].iov_len = 4096; 1371 iov[41].iov_base = (void *)0xFEED2900000; 1372 iov[41].iov_len = 4096; 1373 iov[42].iov_base = (void *)0xFEED2A00000; 1374 iov[42].iov_len = 4096; 1375 iov[43].iov_base = (void *)0xFEED2B00000; 1376 iov[43].iov_len = 12288; 1377 iov[44].iov_base = (void *)0xFEED2C00000; 1378 iov[44].iov_len = 8192; 1379 iov[45].iov_base = (void *)0xFEED2F00000; 1380 iov[45].iov_len = 4096; 1381 iov[46].iov_base = (void *)0xFEED3000000; 1382 iov[46].iov_len = 4096; 1383 iov[47].iov_base = (void *)0xFEED3100000; 1384 iov[47].iov_len = 4096; 1385 iov[48].iov_base = (void *)0xFEED3200000; 1386 iov[48].iov_len = 24576; 1387 iov[49].iov_base = (void *)0xFEED3300000; 1388 iov[49].iov_len = 16384; 1389 iov[50].iov_base = (void *)0xFEED3400000; 1390 iov[50].iov_len = 12288; 1391 iov[51].iov_base = (void *)0xFEED3500000; 1392 iov[51].iov_len = 4096; 1393 iov[52].iov_base = (void *)0xFEED3600000; 1394 iov[52].iov_len = 4096; 1395 iov[53].iov_base = (void *)0xFEED3700000; 1396 iov[53].iov_len = 4096; 1397 iov[54].iov_base = (void *)0xFEED3800000; 1398 iov[54].iov_len = 28672; 1399 iov[55].iov_base = (void *)0xFEED3900000; 1400 iov[55].iov_len = 20480; 1401 iov[56].iov_base = (void *)0xFEED3A00000; 1402 iov[56].iov_len = 4096; 1403 iov[57].iov_base = (void *)0xFEED3B00000; 1404 iov[57].iov_len = 12288; 1405 iov[58].iov_base = (void *)0xFEED3C00000; 1406 iov[58].iov_len = 4096; 1407 iov[59].iov_base = (void *)0xFEED3D00000; 1408 iov[59].iov_len = 4096; 1409 iov[60].iov_base = (void *)0xFEED3E00000; 1410 iov[60].iov_len = 352; 1411 1412 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1413 * of child iovs, 1414 */ 1415 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1416 expected_io->md_buf = md_buf; 1417 for (i = 0; i < 32; i++) { 1418 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1419 } 1420 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1421 1422 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1423 * split by the IO boundary requirement. 1424 */ 1425 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1426 expected_io->md_buf = md_buf + 126 * 8; 1427 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1428 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1429 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1430 1431 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1432 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1433 */ 1434 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1435 expected_io->md_buf = md_buf + 128 * 8; 1436 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1437 iov[33].iov_len - 864); 1438 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1439 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1440 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1441 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1442 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1443 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1444 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1445 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1446 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1447 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1448 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1449 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1450 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1451 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1452 1453 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1454 * first 864 bytes of iov[52] split by the IO boundary requirement. 1455 */ 1456 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1457 expected_io->md_buf = md_buf + 256 * 8; 1458 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1459 iov[46].iov_len - 864); 1460 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1461 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1462 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1463 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1464 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1465 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1466 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1467 1468 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1469 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1470 */ 1471 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1472 expected_io->md_buf = md_buf + 384 * 8; 1473 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1474 iov[52].iov_len - 864); 1475 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1476 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1477 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1478 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1479 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1480 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1481 1482 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1483 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1484 */ 1485 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1486 expected_io->md_buf = md_buf + 512 * 8; 1487 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1488 iov[57].iov_len - 4960); 1489 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1490 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1491 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1492 1493 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1494 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1495 expected_io->md_buf = md_buf + 542 * 8; 1496 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1497 iov[59].iov_len - 3936); 1498 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1499 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1500 1501 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1502 0, 543, io_done, NULL); 1503 CU_ASSERT(rc == 0); 1504 CU_ASSERT(g_io_done == false); 1505 1506 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1507 stub_complete_io(1); 1508 CU_ASSERT(g_io_done == false); 1509 1510 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1511 stub_complete_io(5); 1512 CU_ASSERT(g_io_done == false); 1513 1514 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1515 stub_complete_io(1); 1516 CU_ASSERT(g_io_done == true); 1517 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1518 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1519 1520 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1521 * split, so test that. 1522 */ 1523 bdev->optimal_io_boundary = 15; 1524 g_io_done = false; 1525 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1526 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1527 1528 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1529 CU_ASSERT(rc == 0); 1530 CU_ASSERT(g_io_done == false); 1531 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1532 stub_complete_io(1); 1533 CU_ASSERT(g_io_done == true); 1534 1535 /* Test an UNMAP. This should also not be split. */ 1536 bdev->optimal_io_boundary = 16; 1537 g_io_done = false; 1538 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1539 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1540 1541 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1542 CU_ASSERT(rc == 0); 1543 CU_ASSERT(g_io_done == false); 1544 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1545 stub_complete_io(1); 1546 CU_ASSERT(g_io_done == true); 1547 1548 /* Test a FLUSH. This should also not be split. */ 1549 bdev->optimal_io_boundary = 16; 1550 g_io_done = false; 1551 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1552 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1553 1554 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1555 CU_ASSERT(rc == 0); 1556 CU_ASSERT(g_io_done == false); 1557 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1558 stub_complete_io(1); 1559 CU_ASSERT(g_io_done == true); 1560 1561 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1562 1563 /* Children requests return an error status */ 1564 bdev->optimal_io_boundary = 16; 1565 iov[0].iov_base = (void *)0x10000; 1566 iov[0].iov_len = 512 * 64; 1567 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1568 g_io_done = false; 1569 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1570 1571 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1572 CU_ASSERT(rc == 0); 1573 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1574 stub_complete_io(4); 1575 CU_ASSERT(g_io_done == false); 1576 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1577 stub_complete_io(1); 1578 CU_ASSERT(g_io_done == true); 1579 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1580 1581 /* Test if a multi vector command terminated with failure before continueing 1582 * splitting process when one of child I/O failed. 1583 * The multi vector command is as same as the above that needs to be split by strip 1584 * and then needs to be split further due to the capacity of child iovs. 1585 */ 1586 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1587 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1588 iov[i].iov_len = 512; 1589 } 1590 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1591 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1592 1593 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1594 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1595 1596 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1597 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1598 1599 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1600 1601 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1602 g_io_done = false; 1603 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1604 1605 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1606 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1607 CU_ASSERT(rc == 0); 1608 CU_ASSERT(g_io_done == false); 1609 1610 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1611 stub_complete_io(1); 1612 CU_ASSERT(g_io_done == true); 1613 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1614 1615 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1616 1617 /* for this test we will create the following conditions to hit the code path where 1618 * we are trying to send and IO following a split that has no iovs because we had to 1619 * trim them for alignment reasons. 1620 * 1621 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1622 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1623 * position 30 and overshoot by 0x2e. 1624 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1625 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1626 * which eliniates that vector so we just send the first split IO with 30 vectors 1627 * and let the completion pick up the last 2 vectors. 1628 */ 1629 bdev->optimal_io_boundary = 32; 1630 bdev->split_on_optimal_io_boundary = true; 1631 g_io_done = false; 1632 1633 /* Init all parent IOVs to 0x212 */ 1634 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1635 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1636 iov[i].iov_len = 0x212; 1637 } 1638 1639 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1640 BDEV_IO_NUM_CHILD_IOV - 1); 1641 /* expect 0-29 to be 1:1 with the parent iov */ 1642 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1643 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1644 } 1645 1646 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1647 * where 0x1e is the amount we overshot the 16K boundary 1648 */ 1649 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1650 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1651 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1652 1653 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1654 * shortened that take it to the next boundary and then a final one to get us to 1655 * 0x4200 bytes for the IO. 1656 */ 1657 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1658 BDEV_IO_NUM_CHILD_IOV, 2); 1659 /* position 30 picked up the remaining bytes to the next boundary */ 1660 ut_expected_io_set_iov(expected_io, 0, 1661 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1662 1663 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1664 ut_expected_io_set_iov(expected_io, 1, 1665 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1666 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1667 1668 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1669 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1670 CU_ASSERT(rc == 0); 1671 CU_ASSERT(g_io_done == false); 1672 1673 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1674 stub_complete_io(1); 1675 CU_ASSERT(g_io_done == false); 1676 1677 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1678 stub_complete_io(1); 1679 CU_ASSERT(g_io_done == true); 1680 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1681 1682 spdk_put_io_channel(io_ch); 1683 spdk_bdev_close(desc); 1684 free_bdev(bdev); 1685 spdk_bdev_finish(bdev_fini_cb, NULL); 1686 poll_threads(); 1687 } 1688 1689 static void 1690 bdev_io_split_with_io_wait(void) 1691 { 1692 struct spdk_bdev *bdev; 1693 struct spdk_bdev_desc *desc = NULL; 1694 struct spdk_io_channel *io_ch; 1695 struct spdk_bdev_channel *channel; 1696 struct spdk_bdev_mgmt_channel *mgmt_ch; 1697 struct spdk_bdev_opts bdev_opts = { 1698 .bdev_io_pool_size = 2, 1699 .bdev_io_cache_size = 1, 1700 }; 1701 struct iovec iov[3]; 1702 struct ut_expected_io *expected_io; 1703 int rc; 1704 1705 rc = spdk_bdev_set_opts(&bdev_opts); 1706 CU_ASSERT(rc == 0); 1707 spdk_bdev_initialize(bdev_init_cb, NULL); 1708 1709 bdev = allocate_bdev("bdev0"); 1710 1711 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1712 CU_ASSERT(rc == 0); 1713 CU_ASSERT(desc != NULL); 1714 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1715 io_ch = spdk_bdev_get_io_channel(desc); 1716 CU_ASSERT(io_ch != NULL); 1717 channel = spdk_io_channel_get_ctx(io_ch); 1718 mgmt_ch = channel->shared_resource->mgmt_ch; 1719 1720 bdev->optimal_io_boundary = 16; 1721 bdev->split_on_optimal_io_boundary = true; 1722 1723 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1724 CU_ASSERT(rc == 0); 1725 1726 /* Now test that a single-vector command is split correctly. 1727 * Offset 14, length 8, payload 0xF000 1728 * Child - Offset 14, length 2, payload 0xF000 1729 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1730 * 1731 * Set up the expected values before calling spdk_bdev_read_blocks 1732 */ 1733 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1734 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1735 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1736 1737 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1738 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1739 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1740 1741 /* The following children will be submitted sequentially due to the capacity of 1742 * spdk_bdev_io. 1743 */ 1744 1745 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 1746 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1747 CU_ASSERT(rc == 0); 1748 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1749 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1750 1751 /* Completing the first read I/O will submit the first child */ 1752 stub_complete_io(1); 1753 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1754 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1755 1756 /* Completing the first child will submit the second child */ 1757 stub_complete_io(1); 1758 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1759 1760 /* Complete the second child I/O. This should result in our callback getting 1761 * invoked since the parent I/O is now complete. 1762 */ 1763 stub_complete_io(1); 1764 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1765 1766 /* Now set up a more complex, multi-vector command that needs to be split, 1767 * including splitting iovecs. 1768 */ 1769 iov[0].iov_base = (void *)0x10000; 1770 iov[0].iov_len = 512; 1771 iov[1].iov_base = (void *)0x20000; 1772 iov[1].iov_len = 20 * 512; 1773 iov[2].iov_base = (void *)0x30000; 1774 iov[2].iov_len = 11 * 512; 1775 1776 g_io_done = false; 1777 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1778 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1779 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1780 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1781 1782 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1783 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1784 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1785 1786 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1787 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1788 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1789 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1790 1791 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1792 CU_ASSERT(rc == 0); 1793 CU_ASSERT(g_io_done == false); 1794 1795 /* The following children will be submitted sequentially due to the capacity of 1796 * spdk_bdev_io. 1797 */ 1798 1799 /* Completing the first child will submit the second child */ 1800 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1801 stub_complete_io(1); 1802 CU_ASSERT(g_io_done == false); 1803 1804 /* Completing the second child will submit the third child */ 1805 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1806 stub_complete_io(1); 1807 CU_ASSERT(g_io_done == false); 1808 1809 /* Completing the third child will result in our callback getting invoked 1810 * since the parent I/O is now complete. 1811 */ 1812 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1813 stub_complete_io(1); 1814 CU_ASSERT(g_io_done == true); 1815 1816 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1817 1818 spdk_put_io_channel(io_ch); 1819 spdk_bdev_close(desc); 1820 free_bdev(bdev); 1821 spdk_bdev_finish(bdev_fini_cb, NULL); 1822 poll_threads(); 1823 } 1824 1825 static void 1826 bdev_io_alignment(void) 1827 { 1828 struct spdk_bdev *bdev; 1829 struct spdk_bdev_desc *desc = NULL; 1830 struct spdk_io_channel *io_ch; 1831 struct spdk_bdev_opts bdev_opts = { 1832 .bdev_io_pool_size = 20, 1833 .bdev_io_cache_size = 2, 1834 }; 1835 int rc; 1836 void *buf = NULL; 1837 struct iovec iovs[2]; 1838 int iovcnt; 1839 uint64_t alignment; 1840 1841 rc = spdk_bdev_set_opts(&bdev_opts); 1842 CU_ASSERT(rc == 0); 1843 spdk_bdev_initialize(bdev_init_cb, NULL); 1844 1845 fn_table.submit_request = stub_submit_request_get_buf; 1846 bdev = allocate_bdev("bdev0"); 1847 1848 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1849 CU_ASSERT(rc == 0); 1850 CU_ASSERT(desc != NULL); 1851 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1852 io_ch = spdk_bdev_get_io_channel(desc); 1853 CU_ASSERT(io_ch != NULL); 1854 1855 /* Create aligned buffer */ 1856 rc = posix_memalign(&buf, 4096, 8192); 1857 SPDK_CU_ASSERT_FATAL(rc == 0); 1858 1859 /* Pass aligned single buffer with no alignment required */ 1860 alignment = 1; 1861 bdev->required_alignment = spdk_u32log2(alignment); 1862 1863 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1864 CU_ASSERT(rc == 0); 1865 stub_complete_io(1); 1866 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1867 alignment)); 1868 1869 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1870 CU_ASSERT(rc == 0); 1871 stub_complete_io(1); 1872 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1873 alignment)); 1874 1875 /* Pass unaligned single buffer with no alignment required */ 1876 alignment = 1; 1877 bdev->required_alignment = spdk_u32log2(alignment); 1878 1879 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1880 CU_ASSERT(rc == 0); 1881 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1882 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1883 stub_complete_io(1); 1884 1885 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1886 CU_ASSERT(rc == 0); 1887 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1888 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1889 stub_complete_io(1); 1890 1891 /* Pass unaligned single buffer with 512 alignment required */ 1892 alignment = 512; 1893 bdev->required_alignment = spdk_u32log2(alignment); 1894 1895 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1896 CU_ASSERT(rc == 0); 1897 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1898 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1899 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1900 alignment)); 1901 stub_complete_io(1); 1902 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1903 1904 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1905 CU_ASSERT(rc == 0); 1906 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1907 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1908 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1909 alignment)); 1910 stub_complete_io(1); 1911 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1912 1913 /* Pass unaligned single buffer with 4096 alignment required */ 1914 alignment = 4096; 1915 bdev->required_alignment = spdk_u32log2(alignment); 1916 1917 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1918 CU_ASSERT(rc == 0); 1919 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1920 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1921 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1922 alignment)); 1923 stub_complete_io(1); 1924 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1925 1926 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1927 CU_ASSERT(rc == 0); 1928 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1929 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1930 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1931 alignment)); 1932 stub_complete_io(1); 1933 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1934 1935 /* Pass aligned iovs with no alignment required */ 1936 alignment = 1; 1937 bdev->required_alignment = spdk_u32log2(alignment); 1938 1939 iovcnt = 1; 1940 iovs[0].iov_base = buf; 1941 iovs[0].iov_len = 512; 1942 1943 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1944 CU_ASSERT(rc == 0); 1945 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1946 stub_complete_io(1); 1947 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1948 1949 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1950 CU_ASSERT(rc == 0); 1951 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1952 stub_complete_io(1); 1953 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1954 1955 /* Pass unaligned iovs with no alignment required */ 1956 alignment = 1; 1957 bdev->required_alignment = spdk_u32log2(alignment); 1958 1959 iovcnt = 2; 1960 iovs[0].iov_base = buf + 16; 1961 iovs[0].iov_len = 256; 1962 iovs[1].iov_base = buf + 16 + 256 + 32; 1963 iovs[1].iov_len = 256; 1964 1965 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1966 CU_ASSERT(rc == 0); 1967 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1968 stub_complete_io(1); 1969 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1970 1971 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1972 CU_ASSERT(rc == 0); 1973 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1974 stub_complete_io(1); 1975 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1976 1977 /* Pass unaligned iov with 2048 alignment required */ 1978 alignment = 2048; 1979 bdev->required_alignment = spdk_u32log2(alignment); 1980 1981 iovcnt = 2; 1982 iovs[0].iov_base = buf + 16; 1983 iovs[0].iov_len = 256; 1984 iovs[1].iov_base = buf + 16 + 256 + 32; 1985 iovs[1].iov_len = 256; 1986 1987 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1988 CU_ASSERT(rc == 0); 1989 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1990 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1991 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1992 alignment)); 1993 stub_complete_io(1); 1994 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1995 1996 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1997 CU_ASSERT(rc == 0); 1998 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1999 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2000 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2001 alignment)); 2002 stub_complete_io(1); 2003 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2004 2005 /* Pass iov without allocated buffer without alignment required */ 2006 alignment = 1; 2007 bdev->required_alignment = spdk_u32log2(alignment); 2008 2009 iovcnt = 1; 2010 iovs[0].iov_base = NULL; 2011 iovs[0].iov_len = 0; 2012 2013 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2014 CU_ASSERT(rc == 0); 2015 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2016 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2017 alignment)); 2018 stub_complete_io(1); 2019 2020 /* Pass iov without allocated buffer with 1024 alignment required */ 2021 alignment = 1024; 2022 bdev->required_alignment = spdk_u32log2(alignment); 2023 2024 iovcnt = 1; 2025 iovs[0].iov_base = NULL; 2026 iovs[0].iov_len = 0; 2027 2028 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2029 CU_ASSERT(rc == 0); 2030 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2031 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2032 alignment)); 2033 stub_complete_io(1); 2034 2035 spdk_put_io_channel(io_ch); 2036 spdk_bdev_close(desc); 2037 free_bdev(bdev); 2038 fn_table.submit_request = stub_submit_request; 2039 spdk_bdev_finish(bdev_fini_cb, NULL); 2040 poll_threads(); 2041 2042 free(buf); 2043 } 2044 2045 static void 2046 bdev_io_alignment_with_boundary(void) 2047 { 2048 struct spdk_bdev *bdev; 2049 struct spdk_bdev_desc *desc = NULL; 2050 struct spdk_io_channel *io_ch; 2051 struct spdk_bdev_opts bdev_opts = { 2052 .bdev_io_pool_size = 20, 2053 .bdev_io_cache_size = 2, 2054 }; 2055 int rc; 2056 void *buf = NULL; 2057 struct iovec iovs[2]; 2058 int iovcnt; 2059 uint64_t alignment; 2060 2061 rc = spdk_bdev_set_opts(&bdev_opts); 2062 CU_ASSERT(rc == 0); 2063 spdk_bdev_initialize(bdev_init_cb, NULL); 2064 2065 fn_table.submit_request = stub_submit_request_get_buf; 2066 bdev = allocate_bdev("bdev0"); 2067 2068 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2069 CU_ASSERT(rc == 0); 2070 CU_ASSERT(desc != NULL); 2071 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2072 io_ch = spdk_bdev_get_io_channel(desc); 2073 CU_ASSERT(io_ch != NULL); 2074 2075 /* Create aligned buffer */ 2076 rc = posix_memalign(&buf, 4096, 131072); 2077 SPDK_CU_ASSERT_FATAL(rc == 0); 2078 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2079 2080 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2081 alignment = 512; 2082 bdev->required_alignment = spdk_u32log2(alignment); 2083 bdev->optimal_io_boundary = 2; 2084 bdev->split_on_optimal_io_boundary = true; 2085 2086 iovcnt = 1; 2087 iovs[0].iov_base = NULL; 2088 iovs[0].iov_len = 512 * 3; 2089 2090 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2091 CU_ASSERT(rc == 0); 2092 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2093 stub_complete_io(2); 2094 2095 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2096 alignment = 512; 2097 bdev->required_alignment = spdk_u32log2(alignment); 2098 bdev->optimal_io_boundary = 16; 2099 bdev->split_on_optimal_io_boundary = true; 2100 2101 iovcnt = 1; 2102 iovs[0].iov_base = NULL; 2103 iovs[0].iov_len = 512 * 16; 2104 2105 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2106 CU_ASSERT(rc == 0); 2107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2108 stub_complete_io(2); 2109 2110 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2111 alignment = 512; 2112 bdev->required_alignment = spdk_u32log2(alignment); 2113 bdev->optimal_io_boundary = 128; 2114 bdev->split_on_optimal_io_boundary = true; 2115 2116 iovcnt = 1; 2117 iovs[0].iov_base = buf + 16; 2118 iovs[0].iov_len = 512 * 160; 2119 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2120 CU_ASSERT(rc == 0); 2121 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2122 stub_complete_io(2); 2123 2124 /* 512 * 3 with 2 IO boundary */ 2125 alignment = 512; 2126 bdev->required_alignment = spdk_u32log2(alignment); 2127 bdev->optimal_io_boundary = 2; 2128 bdev->split_on_optimal_io_boundary = true; 2129 2130 iovcnt = 2; 2131 iovs[0].iov_base = buf + 16; 2132 iovs[0].iov_len = 512; 2133 iovs[1].iov_base = buf + 16 + 512 + 32; 2134 iovs[1].iov_len = 1024; 2135 2136 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2137 CU_ASSERT(rc == 0); 2138 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2139 stub_complete_io(2); 2140 2141 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2142 CU_ASSERT(rc == 0); 2143 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2144 stub_complete_io(2); 2145 2146 /* 512 * 64 with 32 IO boundary */ 2147 bdev->optimal_io_boundary = 32; 2148 iovcnt = 2; 2149 iovs[0].iov_base = buf + 16; 2150 iovs[0].iov_len = 16384; 2151 iovs[1].iov_base = buf + 16 + 16384 + 32; 2152 iovs[1].iov_len = 16384; 2153 2154 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2155 CU_ASSERT(rc == 0); 2156 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2157 stub_complete_io(3); 2158 2159 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2160 CU_ASSERT(rc == 0); 2161 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2162 stub_complete_io(3); 2163 2164 /* 512 * 160 with 32 IO boundary */ 2165 iovcnt = 1; 2166 iovs[0].iov_base = buf + 16; 2167 iovs[0].iov_len = 16384 + 65536; 2168 2169 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2170 CU_ASSERT(rc == 0); 2171 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2172 stub_complete_io(6); 2173 2174 spdk_put_io_channel(io_ch); 2175 spdk_bdev_close(desc); 2176 free_bdev(bdev); 2177 fn_table.submit_request = stub_submit_request; 2178 spdk_bdev_finish(bdev_fini_cb, NULL); 2179 poll_threads(); 2180 2181 free(buf); 2182 } 2183 2184 static void 2185 histogram_status_cb(void *cb_arg, int status) 2186 { 2187 g_status = status; 2188 } 2189 2190 static void 2191 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 2192 { 2193 g_status = status; 2194 g_histogram = histogram; 2195 } 2196 2197 static void 2198 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 2199 uint64_t total, uint64_t so_far) 2200 { 2201 g_count += count; 2202 } 2203 2204 static void 2205 bdev_histograms(void) 2206 { 2207 struct spdk_bdev *bdev; 2208 struct spdk_bdev_desc *desc = NULL; 2209 struct spdk_io_channel *ch; 2210 struct spdk_histogram_data *histogram; 2211 uint8_t buf[4096]; 2212 int rc; 2213 2214 spdk_bdev_initialize(bdev_init_cb, NULL); 2215 2216 bdev = allocate_bdev("bdev"); 2217 2218 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2219 CU_ASSERT(rc == 0); 2220 CU_ASSERT(desc != NULL); 2221 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2222 2223 ch = spdk_bdev_get_io_channel(desc); 2224 CU_ASSERT(ch != NULL); 2225 2226 /* Enable histogram */ 2227 g_status = -1; 2228 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 2229 poll_threads(); 2230 CU_ASSERT(g_status == 0); 2231 CU_ASSERT(bdev->internal.histogram_enabled == true); 2232 2233 /* Allocate histogram */ 2234 histogram = spdk_histogram_data_alloc(); 2235 SPDK_CU_ASSERT_FATAL(histogram != NULL); 2236 2237 /* Check if histogram is zeroed */ 2238 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2239 poll_threads(); 2240 CU_ASSERT(g_status == 0); 2241 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2242 2243 g_count = 0; 2244 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2245 2246 CU_ASSERT(g_count == 0); 2247 2248 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2249 CU_ASSERT(rc == 0); 2250 2251 spdk_delay_us(10); 2252 stub_complete_io(1); 2253 poll_threads(); 2254 2255 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2256 CU_ASSERT(rc == 0); 2257 2258 spdk_delay_us(10); 2259 stub_complete_io(1); 2260 poll_threads(); 2261 2262 /* Check if histogram gathered data from all I/O channels */ 2263 g_histogram = NULL; 2264 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2265 poll_threads(); 2266 CU_ASSERT(g_status == 0); 2267 CU_ASSERT(bdev->internal.histogram_enabled == true); 2268 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2269 2270 g_count = 0; 2271 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2272 CU_ASSERT(g_count == 2); 2273 2274 /* Disable histogram */ 2275 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 2276 poll_threads(); 2277 CU_ASSERT(g_status == 0); 2278 CU_ASSERT(bdev->internal.histogram_enabled == false); 2279 2280 /* Try to run histogram commands on disabled bdev */ 2281 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2282 poll_threads(); 2283 CU_ASSERT(g_status == -EFAULT); 2284 2285 spdk_histogram_data_free(histogram); 2286 spdk_put_io_channel(ch); 2287 spdk_bdev_close(desc); 2288 free_bdev(bdev); 2289 spdk_bdev_finish(bdev_fini_cb, NULL); 2290 poll_threads(); 2291 } 2292 2293 static void 2294 _bdev_compare(bool emulated) 2295 { 2296 struct spdk_bdev *bdev; 2297 struct spdk_bdev_desc *desc = NULL; 2298 struct spdk_io_channel *ioch; 2299 struct ut_expected_io *expected_io; 2300 uint64_t offset, num_blocks; 2301 uint32_t num_completed; 2302 char aa_buf[512]; 2303 char bb_buf[512]; 2304 struct iovec compare_iov; 2305 uint8_t io_type; 2306 int rc; 2307 2308 if (emulated) { 2309 io_type = SPDK_BDEV_IO_TYPE_READ; 2310 } else { 2311 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 2312 } 2313 2314 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2315 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2316 2317 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 2318 2319 spdk_bdev_initialize(bdev_init_cb, NULL); 2320 fn_table.submit_request = stub_submit_request_get_buf; 2321 bdev = allocate_bdev("bdev"); 2322 2323 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2324 CU_ASSERT_EQUAL(rc, 0); 2325 SPDK_CU_ASSERT_FATAL(desc != NULL); 2326 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2327 ioch = spdk_bdev_get_io_channel(desc); 2328 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2329 2330 fn_table.submit_request = stub_submit_request_get_buf; 2331 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2332 2333 offset = 50; 2334 num_blocks = 1; 2335 compare_iov.iov_base = aa_buf; 2336 compare_iov.iov_len = sizeof(aa_buf); 2337 2338 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2339 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2340 2341 g_io_done = false; 2342 g_compare_read_buf = aa_buf; 2343 g_compare_read_buf_len = sizeof(aa_buf); 2344 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2345 CU_ASSERT_EQUAL(rc, 0); 2346 num_completed = stub_complete_io(1); 2347 CU_ASSERT_EQUAL(num_completed, 1); 2348 CU_ASSERT(g_io_done == true); 2349 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2350 2351 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2352 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2353 2354 g_io_done = false; 2355 g_compare_read_buf = bb_buf; 2356 g_compare_read_buf_len = sizeof(bb_buf); 2357 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2358 CU_ASSERT_EQUAL(rc, 0); 2359 num_completed = stub_complete_io(1); 2360 CU_ASSERT_EQUAL(num_completed, 1); 2361 CU_ASSERT(g_io_done == true); 2362 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2363 2364 spdk_put_io_channel(ioch); 2365 spdk_bdev_close(desc); 2366 free_bdev(bdev); 2367 fn_table.submit_request = stub_submit_request; 2368 spdk_bdev_finish(bdev_fini_cb, NULL); 2369 poll_threads(); 2370 2371 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2372 2373 g_compare_read_buf = NULL; 2374 } 2375 2376 static void 2377 bdev_compare(void) 2378 { 2379 _bdev_compare(true); 2380 _bdev_compare(false); 2381 } 2382 2383 static void 2384 bdev_compare_and_write(void) 2385 { 2386 struct spdk_bdev *bdev; 2387 struct spdk_bdev_desc *desc = NULL; 2388 struct spdk_io_channel *ioch; 2389 struct ut_expected_io *expected_io; 2390 uint64_t offset, num_blocks; 2391 uint32_t num_completed; 2392 char aa_buf[512]; 2393 char bb_buf[512]; 2394 char cc_buf[512]; 2395 char write_buf[512]; 2396 struct iovec compare_iov; 2397 struct iovec write_iov; 2398 int rc; 2399 2400 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2401 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2402 memset(cc_buf, 0xcc, sizeof(cc_buf)); 2403 2404 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 2405 2406 spdk_bdev_initialize(bdev_init_cb, NULL); 2407 fn_table.submit_request = stub_submit_request_get_buf; 2408 bdev = allocate_bdev("bdev"); 2409 2410 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2411 CU_ASSERT_EQUAL(rc, 0); 2412 SPDK_CU_ASSERT_FATAL(desc != NULL); 2413 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2414 ioch = spdk_bdev_get_io_channel(desc); 2415 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2416 2417 fn_table.submit_request = stub_submit_request_get_buf; 2418 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2419 2420 offset = 50; 2421 num_blocks = 1; 2422 compare_iov.iov_base = aa_buf; 2423 compare_iov.iov_len = sizeof(aa_buf); 2424 write_iov.iov_base = bb_buf; 2425 write_iov.iov_len = sizeof(bb_buf); 2426 2427 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2428 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2429 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 2430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2431 2432 g_io_done = false; 2433 g_compare_read_buf = aa_buf; 2434 g_compare_read_buf_len = sizeof(aa_buf); 2435 memset(write_buf, 0, sizeof(write_buf)); 2436 g_compare_write_buf = write_buf; 2437 g_compare_write_buf_len = sizeof(write_buf); 2438 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2439 offset, num_blocks, io_done, NULL); 2440 /* Trigger range locking */ 2441 poll_threads(); 2442 CU_ASSERT_EQUAL(rc, 0); 2443 num_completed = stub_complete_io(1); 2444 CU_ASSERT_EQUAL(num_completed, 1); 2445 CU_ASSERT(g_io_done == false); 2446 num_completed = stub_complete_io(1); 2447 /* Trigger range unlocking */ 2448 poll_threads(); 2449 CU_ASSERT_EQUAL(num_completed, 1); 2450 CU_ASSERT(g_io_done == true); 2451 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2452 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 2453 2454 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2455 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2456 2457 g_io_done = false; 2458 g_compare_read_buf = cc_buf; 2459 g_compare_read_buf_len = sizeof(cc_buf); 2460 memset(write_buf, 0, sizeof(write_buf)); 2461 g_compare_write_buf = write_buf; 2462 g_compare_write_buf_len = sizeof(write_buf); 2463 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2464 offset, num_blocks, io_done, NULL); 2465 /* Trigger range locking */ 2466 poll_threads(); 2467 CU_ASSERT_EQUAL(rc, 0); 2468 num_completed = stub_complete_io(1); 2469 /* Trigger range unlocking earlier because we expect error here */ 2470 poll_threads(); 2471 CU_ASSERT_EQUAL(num_completed, 1); 2472 CU_ASSERT(g_io_done == true); 2473 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2474 num_completed = stub_complete_io(1); 2475 CU_ASSERT_EQUAL(num_completed, 0); 2476 2477 spdk_put_io_channel(ioch); 2478 spdk_bdev_close(desc); 2479 free_bdev(bdev); 2480 fn_table.submit_request = stub_submit_request; 2481 spdk_bdev_finish(bdev_fini_cb, NULL); 2482 poll_threads(); 2483 2484 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2485 2486 g_compare_read_buf = NULL; 2487 g_compare_write_buf = NULL; 2488 } 2489 2490 static void 2491 bdev_write_zeroes(void) 2492 { 2493 struct spdk_bdev *bdev; 2494 struct spdk_bdev_desc *desc = NULL; 2495 struct spdk_io_channel *ioch; 2496 struct ut_expected_io *expected_io; 2497 uint64_t offset, num_io_blocks, num_blocks; 2498 uint32_t num_completed, num_requests; 2499 int rc; 2500 2501 spdk_bdev_initialize(bdev_init_cb, NULL); 2502 bdev = allocate_bdev("bdev"); 2503 2504 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2505 CU_ASSERT_EQUAL(rc, 0); 2506 SPDK_CU_ASSERT_FATAL(desc != NULL); 2507 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2508 ioch = spdk_bdev_get_io_channel(desc); 2509 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2510 2511 fn_table.submit_request = stub_submit_request; 2512 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2513 2514 /* First test that if the bdev supports write_zeroes, the request won't be split */ 2515 bdev->md_len = 0; 2516 bdev->blocklen = 4096; 2517 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2518 2519 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 2520 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2521 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2522 CU_ASSERT_EQUAL(rc, 0); 2523 num_completed = stub_complete_io(1); 2524 CU_ASSERT_EQUAL(num_completed, 1); 2525 2526 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 2527 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 2528 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 2529 num_requests = 2; 2530 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 2531 2532 for (offset = 0; offset < num_requests; ++offset) { 2533 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2534 offset * num_io_blocks, num_io_blocks, 0); 2535 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2536 } 2537 2538 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2539 CU_ASSERT_EQUAL(rc, 0); 2540 num_completed = stub_complete_io(num_requests); 2541 CU_ASSERT_EQUAL(num_completed, num_requests); 2542 2543 /* Check that the splitting is correct if bdev has interleaved metadata */ 2544 bdev->md_interleave = true; 2545 bdev->md_len = 64; 2546 bdev->blocklen = 4096 + 64; 2547 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2548 2549 num_requests = offset = 0; 2550 while (offset < num_blocks) { 2551 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 2552 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2553 offset, num_io_blocks, 0); 2554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2555 offset += num_io_blocks; 2556 num_requests++; 2557 } 2558 2559 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2560 CU_ASSERT_EQUAL(rc, 0); 2561 num_completed = stub_complete_io(num_requests); 2562 CU_ASSERT_EQUAL(num_completed, num_requests); 2563 num_completed = stub_complete_io(num_requests); 2564 assert(num_completed == 0); 2565 2566 /* Check the the same for separate metadata buffer */ 2567 bdev->md_interleave = false; 2568 bdev->md_len = 64; 2569 bdev->blocklen = 4096; 2570 2571 num_requests = offset = 0; 2572 while (offset < num_blocks) { 2573 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 2574 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2575 offset, num_io_blocks, 0); 2576 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 2577 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2578 offset += num_io_blocks; 2579 num_requests++; 2580 } 2581 2582 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2583 CU_ASSERT_EQUAL(rc, 0); 2584 num_completed = stub_complete_io(num_requests); 2585 CU_ASSERT_EQUAL(num_completed, num_requests); 2586 2587 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 2588 spdk_put_io_channel(ioch); 2589 spdk_bdev_close(desc); 2590 free_bdev(bdev); 2591 spdk_bdev_finish(bdev_fini_cb, NULL); 2592 poll_threads(); 2593 } 2594 2595 static void 2596 bdev_open_while_hotremove(void) 2597 { 2598 struct spdk_bdev *bdev; 2599 struct spdk_bdev_desc *desc[2] = {}; 2600 int rc; 2601 2602 bdev = allocate_bdev("bdev"); 2603 2604 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 2605 CU_ASSERT(rc == 0); 2606 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 2607 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 2608 2609 spdk_bdev_unregister(bdev, NULL, NULL); 2610 2611 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 2612 CU_ASSERT(rc == -ENODEV); 2613 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 2614 2615 spdk_bdev_close(desc[0]); 2616 free_bdev(bdev); 2617 } 2618 2619 static void 2620 bdev_close_while_hotremove(void) 2621 { 2622 struct spdk_bdev *bdev; 2623 struct spdk_bdev_desc *desc = NULL; 2624 int rc = 0; 2625 2626 bdev = allocate_bdev("bdev"); 2627 2628 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 2629 CU_ASSERT_EQUAL(rc, 0); 2630 SPDK_CU_ASSERT_FATAL(desc != NULL); 2631 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2632 2633 /* Simulate hot-unplug by unregistering bdev */ 2634 g_event_type1 = 0xFF; 2635 g_unregister_arg = NULL; 2636 g_unregister_rc = -1; 2637 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 2638 /* Close device while remove event is in flight */ 2639 spdk_bdev_close(desc); 2640 2641 /* Ensure that unregister callback is delayed */ 2642 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 2643 CU_ASSERT_EQUAL(g_unregister_rc, -1); 2644 2645 poll_threads(); 2646 2647 /* Event callback shall not be issued because device was closed */ 2648 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 2649 /* Unregister callback is issued */ 2650 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 2651 CU_ASSERT_EQUAL(g_unregister_rc, 0); 2652 2653 free_bdev(bdev); 2654 } 2655 2656 static void 2657 bdev_open_ext(void) 2658 { 2659 struct spdk_bdev *bdev; 2660 struct spdk_bdev_desc *desc1 = NULL; 2661 struct spdk_bdev_desc *desc2 = NULL; 2662 int rc = 0; 2663 2664 bdev = allocate_bdev("bdev"); 2665 2666 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 2667 CU_ASSERT_EQUAL(rc, -EINVAL); 2668 2669 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 2670 CU_ASSERT_EQUAL(rc, 0); 2671 2672 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 2673 CU_ASSERT_EQUAL(rc, 0); 2674 2675 g_event_type1 = 0xFF; 2676 g_event_type2 = 0xFF; 2677 2678 /* Simulate hot-unplug by unregistering bdev */ 2679 spdk_bdev_unregister(bdev, NULL, NULL); 2680 poll_threads(); 2681 2682 /* Check if correct events have been triggered in event callback fn */ 2683 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 2684 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 2685 2686 free_bdev(bdev); 2687 poll_threads(); 2688 } 2689 2690 struct timeout_io_cb_arg { 2691 struct iovec iov; 2692 uint8_t type; 2693 }; 2694 2695 static int 2696 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 2697 { 2698 struct spdk_bdev_io *bdev_io; 2699 int n = 0; 2700 2701 if (!ch) { 2702 return -1; 2703 } 2704 2705 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 2706 n++; 2707 } 2708 2709 return n; 2710 } 2711 2712 static void 2713 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 2714 { 2715 struct timeout_io_cb_arg *ctx = cb_arg; 2716 2717 ctx->type = bdev_io->type; 2718 ctx->iov.iov_base = bdev_io->iov.iov_base; 2719 ctx->iov.iov_len = bdev_io->iov.iov_len; 2720 } 2721 2722 static void 2723 bdev_set_io_timeout(void) 2724 { 2725 struct spdk_bdev *bdev; 2726 struct spdk_bdev_desc *desc = NULL; 2727 struct spdk_io_channel *io_ch = NULL; 2728 struct spdk_bdev_channel *bdev_ch = NULL; 2729 struct timeout_io_cb_arg cb_arg; 2730 2731 spdk_bdev_initialize(bdev_init_cb, NULL); 2732 2733 bdev = allocate_bdev("bdev"); 2734 2735 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 2736 SPDK_CU_ASSERT_FATAL(desc != NULL); 2737 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2738 2739 io_ch = spdk_bdev_get_io_channel(desc); 2740 CU_ASSERT(io_ch != NULL); 2741 2742 bdev_ch = spdk_io_channel_get_ctx(io_ch); 2743 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 2744 2745 /* This is the part1. 2746 * We will check the bdev_ch->io_submitted list 2747 * TO make sure that it can link IOs and only the user submitted IOs 2748 */ 2749 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 2750 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2751 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 2752 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2753 stub_complete_io(1); 2754 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2755 stub_complete_io(1); 2756 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2757 2758 /* Split IO */ 2759 bdev->optimal_io_boundary = 16; 2760 bdev->split_on_optimal_io_boundary = true; 2761 2762 /* Now test that a single-vector command is split correctly. 2763 * Offset 14, length 8, payload 0xF000 2764 * Child - Offset 14, length 2, payload 0xF000 2765 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2766 * 2767 * Set up the expected values before calling spdk_bdev_read_blocks 2768 */ 2769 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2770 /* We count all submitted IOs including IO that are generated by splitting. */ 2771 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 2772 stub_complete_io(1); 2773 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2774 stub_complete_io(1); 2775 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2776 2777 /* Also include the reset IO */ 2778 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2779 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2780 poll_threads(); 2781 stub_complete_io(1); 2782 poll_threads(); 2783 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2784 2785 /* This is part2 2786 * Test the desc timeout poller register 2787 */ 2788 2789 /* Successfully set the timeout */ 2790 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2791 CU_ASSERT(desc->io_timeout_poller != NULL); 2792 CU_ASSERT(desc->timeout_in_sec == 30); 2793 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2794 CU_ASSERT(desc->cb_arg == &cb_arg); 2795 2796 /* Change the timeout limit */ 2797 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2798 CU_ASSERT(desc->io_timeout_poller != NULL); 2799 CU_ASSERT(desc->timeout_in_sec == 20); 2800 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2801 CU_ASSERT(desc->cb_arg == &cb_arg); 2802 2803 /* Disable the timeout */ 2804 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 2805 CU_ASSERT(desc->io_timeout_poller == NULL); 2806 2807 /* This the part3 2808 * We will test to catch timeout IO and check whether the IO is 2809 * the submitted one. 2810 */ 2811 memset(&cb_arg, 0, sizeof(cb_arg)); 2812 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2813 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 2814 2815 /* Don't reach the limit */ 2816 spdk_delay_us(15 * spdk_get_ticks_hz()); 2817 poll_threads(); 2818 CU_ASSERT(cb_arg.type == 0); 2819 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2820 CU_ASSERT(cb_arg.iov.iov_len == 0); 2821 2822 /* 15 + 15 = 30 reach the limit */ 2823 spdk_delay_us(15 * spdk_get_ticks_hz()); 2824 poll_threads(); 2825 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2826 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 2827 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 2828 stub_complete_io(1); 2829 2830 /* Use the same split IO above and check the IO */ 2831 memset(&cb_arg, 0, sizeof(cb_arg)); 2832 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2833 2834 /* The first child complete in time */ 2835 spdk_delay_us(15 * spdk_get_ticks_hz()); 2836 poll_threads(); 2837 stub_complete_io(1); 2838 CU_ASSERT(cb_arg.type == 0); 2839 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2840 CU_ASSERT(cb_arg.iov.iov_len == 0); 2841 2842 /* The second child reach the limit */ 2843 spdk_delay_us(15 * spdk_get_ticks_hz()); 2844 poll_threads(); 2845 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2846 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 2847 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 2848 stub_complete_io(1); 2849 2850 /* Also include the reset IO */ 2851 memset(&cb_arg, 0, sizeof(cb_arg)); 2852 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2853 spdk_delay_us(30 * spdk_get_ticks_hz()); 2854 poll_threads(); 2855 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 2856 stub_complete_io(1); 2857 poll_threads(); 2858 2859 spdk_put_io_channel(io_ch); 2860 spdk_bdev_close(desc); 2861 free_bdev(bdev); 2862 spdk_bdev_finish(bdev_fini_cb, NULL); 2863 poll_threads(); 2864 } 2865 2866 static void 2867 lba_range_overlap(void) 2868 { 2869 struct lba_range r1, r2; 2870 2871 r1.offset = 100; 2872 r1.length = 50; 2873 2874 r2.offset = 0; 2875 r2.length = 1; 2876 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2877 2878 r2.offset = 0; 2879 r2.length = 100; 2880 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2881 2882 r2.offset = 0; 2883 r2.length = 110; 2884 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2885 2886 r2.offset = 100; 2887 r2.length = 10; 2888 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2889 2890 r2.offset = 110; 2891 r2.length = 20; 2892 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2893 2894 r2.offset = 140; 2895 r2.length = 150; 2896 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2897 2898 r2.offset = 130; 2899 r2.length = 200; 2900 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2901 2902 r2.offset = 150; 2903 r2.length = 100; 2904 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2905 2906 r2.offset = 110; 2907 r2.length = 0; 2908 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2909 } 2910 2911 static bool g_lock_lba_range_done; 2912 static bool g_unlock_lba_range_done; 2913 2914 static void 2915 lock_lba_range_done(void *ctx, int status) 2916 { 2917 g_lock_lba_range_done = true; 2918 } 2919 2920 static void 2921 unlock_lba_range_done(void *ctx, int status) 2922 { 2923 g_unlock_lba_range_done = true; 2924 } 2925 2926 static void 2927 lock_lba_range_check_ranges(void) 2928 { 2929 struct spdk_bdev *bdev; 2930 struct spdk_bdev_desc *desc = NULL; 2931 struct spdk_io_channel *io_ch; 2932 struct spdk_bdev_channel *channel; 2933 struct lba_range *range; 2934 int ctx1; 2935 int rc; 2936 2937 spdk_bdev_initialize(bdev_init_cb, NULL); 2938 2939 bdev = allocate_bdev("bdev0"); 2940 2941 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2942 CU_ASSERT(rc == 0); 2943 CU_ASSERT(desc != NULL); 2944 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2945 io_ch = spdk_bdev_get_io_channel(desc); 2946 CU_ASSERT(io_ch != NULL); 2947 channel = spdk_io_channel_get_ctx(io_ch); 2948 2949 g_lock_lba_range_done = false; 2950 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2951 CU_ASSERT(rc == 0); 2952 poll_threads(); 2953 2954 CU_ASSERT(g_lock_lba_range_done == true); 2955 range = TAILQ_FIRST(&channel->locked_ranges); 2956 SPDK_CU_ASSERT_FATAL(range != NULL); 2957 CU_ASSERT(range->offset == 20); 2958 CU_ASSERT(range->length == 10); 2959 CU_ASSERT(range->owner_ch == channel); 2960 2961 /* Unlocks must exactly match a lock. */ 2962 g_unlock_lba_range_done = false; 2963 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 2964 CU_ASSERT(rc == -EINVAL); 2965 CU_ASSERT(g_unlock_lba_range_done == false); 2966 2967 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 2968 CU_ASSERT(rc == 0); 2969 spdk_delay_us(100); 2970 poll_threads(); 2971 2972 CU_ASSERT(g_unlock_lba_range_done == true); 2973 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 2974 2975 spdk_put_io_channel(io_ch); 2976 spdk_bdev_close(desc); 2977 free_bdev(bdev); 2978 spdk_bdev_finish(bdev_fini_cb, NULL); 2979 poll_threads(); 2980 } 2981 2982 static void 2983 lock_lba_range_with_io_outstanding(void) 2984 { 2985 struct spdk_bdev *bdev; 2986 struct spdk_bdev_desc *desc = NULL; 2987 struct spdk_io_channel *io_ch; 2988 struct spdk_bdev_channel *channel; 2989 struct lba_range *range; 2990 char buf[4096]; 2991 int ctx1; 2992 int rc; 2993 2994 spdk_bdev_initialize(bdev_init_cb, NULL); 2995 2996 bdev = allocate_bdev("bdev0"); 2997 2998 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2999 CU_ASSERT(rc == 0); 3000 CU_ASSERT(desc != NULL); 3001 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3002 io_ch = spdk_bdev_get_io_channel(desc); 3003 CU_ASSERT(io_ch != NULL); 3004 channel = spdk_io_channel_get_ctx(io_ch); 3005 3006 g_io_done = false; 3007 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3008 CU_ASSERT(rc == 0); 3009 3010 g_lock_lba_range_done = false; 3011 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3012 CU_ASSERT(rc == 0); 3013 poll_threads(); 3014 3015 /* The lock should immediately become valid, since there are no outstanding 3016 * write I/O. 3017 */ 3018 CU_ASSERT(g_io_done == false); 3019 CU_ASSERT(g_lock_lba_range_done == true); 3020 range = TAILQ_FIRST(&channel->locked_ranges); 3021 SPDK_CU_ASSERT_FATAL(range != NULL); 3022 CU_ASSERT(range->offset == 20); 3023 CU_ASSERT(range->length == 10); 3024 CU_ASSERT(range->owner_ch == channel); 3025 CU_ASSERT(range->locked_ctx == &ctx1); 3026 3027 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3028 CU_ASSERT(rc == 0); 3029 stub_complete_io(1); 3030 spdk_delay_us(100); 3031 poll_threads(); 3032 3033 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3034 3035 /* Now try again, but with a write I/O. */ 3036 g_io_done = false; 3037 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3038 CU_ASSERT(rc == 0); 3039 3040 g_lock_lba_range_done = false; 3041 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3042 CU_ASSERT(rc == 0); 3043 poll_threads(); 3044 3045 /* The lock should not be fully valid yet, since a write I/O is outstanding. 3046 * But note that the range should be on the channel's locked_list, to make sure no 3047 * new write I/O are started. 3048 */ 3049 CU_ASSERT(g_io_done == false); 3050 CU_ASSERT(g_lock_lba_range_done == false); 3051 range = TAILQ_FIRST(&channel->locked_ranges); 3052 SPDK_CU_ASSERT_FATAL(range != NULL); 3053 CU_ASSERT(range->offset == 20); 3054 CU_ASSERT(range->length == 10); 3055 3056 /* Complete the write I/O. This should make the lock valid (checked by confirming 3057 * our callback was invoked). 3058 */ 3059 stub_complete_io(1); 3060 spdk_delay_us(100); 3061 poll_threads(); 3062 CU_ASSERT(g_io_done == true); 3063 CU_ASSERT(g_lock_lba_range_done == true); 3064 3065 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3066 CU_ASSERT(rc == 0); 3067 poll_threads(); 3068 3069 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3070 3071 spdk_put_io_channel(io_ch); 3072 spdk_bdev_close(desc); 3073 free_bdev(bdev); 3074 spdk_bdev_finish(bdev_fini_cb, NULL); 3075 poll_threads(); 3076 } 3077 3078 static void 3079 lock_lba_range_overlapped(void) 3080 { 3081 struct spdk_bdev *bdev; 3082 struct spdk_bdev_desc *desc = NULL; 3083 struct spdk_io_channel *io_ch; 3084 struct spdk_bdev_channel *channel; 3085 struct lba_range *range; 3086 int ctx1; 3087 int rc; 3088 3089 spdk_bdev_initialize(bdev_init_cb, NULL); 3090 3091 bdev = allocate_bdev("bdev0"); 3092 3093 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3094 CU_ASSERT(rc == 0); 3095 CU_ASSERT(desc != NULL); 3096 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3097 io_ch = spdk_bdev_get_io_channel(desc); 3098 CU_ASSERT(io_ch != NULL); 3099 channel = spdk_io_channel_get_ctx(io_ch); 3100 3101 /* Lock range 20-29. */ 3102 g_lock_lba_range_done = false; 3103 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3104 CU_ASSERT(rc == 0); 3105 poll_threads(); 3106 3107 CU_ASSERT(g_lock_lba_range_done == true); 3108 range = TAILQ_FIRST(&channel->locked_ranges); 3109 SPDK_CU_ASSERT_FATAL(range != NULL); 3110 CU_ASSERT(range->offset == 20); 3111 CU_ASSERT(range->length == 10); 3112 3113 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 3114 * 20-29. 3115 */ 3116 g_lock_lba_range_done = false; 3117 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 3118 CU_ASSERT(rc == 0); 3119 poll_threads(); 3120 3121 CU_ASSERT(g_lock_lba_range_done == false); 3122 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3123 SPDK_CU_ASSERT_FATAL(range != NULL); 3124 CU_ASSERT(range->offset == 25); 3125 CU_ASSERT(range->length == 15); 3126 3127 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 3128 * no longer overlaps with an active lock. 3129 */ 3130 g_unlock_lba_range_done = false; 3131 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3132 CU_ASSERT(rc == 0); 3133 poll_threads(); 3134 3135 CU_ASSERT(g_unlock_lba_range_done == true); 3136 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3137 range = TAILQ_FIRST(&channel->locked_ranges); 3138 SPDK_CU_ASSERT_FATAL(range != NULL); 3139 CU_ASSERT(range->offset == 25); 3140 CU_ASSERT(range->length == 15); 3141 3142 /* Lock 40-59. This should immediately lock since it does not overlap with the 3143 * currently active 25-39 lock. 3144 */ 3145 g_lock_lba_range_done = false; 3146 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 3147 CU_ASSERT(rc == 0); 3148 poll_threads(); 3149 3150 CU_ASSERT(g_lock_lba_range_done == true); 3151 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3152 SPDK_CU_ASSERT_FATAL(range != NULL); 3153 range = TAILQ_NEXT(range, tailq); 3154 SPDK_CU_ASSERT_FATAL(range != NULL); 3155 CU_ASSERT(range->offset == 40); 3156 CU_ASSERT(range->length == 20); 3157 3158 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 3159 g_lock_lba_range_done = false; 3160 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 3161 CU_ASSERT(rc == 0); 3162 poll_threads(); 3163 3164 CU_ASSERT(g_lock_lba_range_done == false); 3165 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3166 SPDK_CU_ASSERT_FATAL(range != NULL); 3167 CU_ASSERT(range->offset == 35); 3168 CU_ASSERT(range->length == 10); 3169 3170 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 3171 * the 40-59 lock is still active. 3172 */ 3173 g_unlock_lba_range_done = false; 3174 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 3175 CU_ASSERT(rc == 0); 3176 poll_threads(); 3177 3178 CU_ASSERT(g_unlock_lba_range_done == true); 3179 CU_ASSERT(g_lock_lba_range_done == false); 3180 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3181 SPDK_CU_ASSERT_FATAL(range != NULL); 3182 CU_ASSERT(range->offset == 35); 3183 CU_ASSERT(range->length == 10); 3184 3185 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 3186 * no longer any active overlapping locks. 3187 */ 3188 g_unlock_lba_range_done = false; 3189 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 3190 CU_ASSERT(rc == 0); 3191 poll_threads(); 3192 3193 CU_ASSERT(g_unlock_lba_range_done == true); 3194 CU_ASSERT(g_lock_lba_range_done == true); 3195 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3196 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3197 SPDK_CU_ASSERT_FATAL(range != NULL); 3198 CU_ASSERT(range->offset == 35); 3199 CU_ASSERT(range->length == 10); 3200 3201 /* Finally, unlock 35-44. */ 3202 g_unlock_lba_range_done = false; 3203 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 3204 CU_ASSERT(rc == 0); 3205 poll_threads(); 3206 3207 CU_ASSERT(g_unlock_lba_range_done == true); 3208 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 3209 3210 spdk_put_io_channel(io_ch); 3211 spdk_bdev_close(desc); 3212 free_bdev(bdev); 3213 spdk_bdev_finish(bdev_fini_cb, NULL); 3214 poll_threads(); 3215 } 3216 3217 static void 3218 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 3219 { 3220 g_abort_done = true; 3221 g_abort_status = bdev_io->internal.status; 3222 spdk_bdev_free_io(bdev_io); 3223 } 3224 3225 static void 3226 bdev_io_abort(void) 3227 { 3228 struct spdk_bdev *bdev; 3229 struct spdk_bdev_desc *desc = NULL; 3230 struct spdk_io_channel *io_ch; 3231 struct spdk_bdev_channel *channel; 3232 struct spdk_bdev_mgmt_channel *mgmt_ch; 3233 struct spdk_bdev_opts bdev_opts = { 3234 .bdev_io_pool_size = 7, 3235 .bdev_io_cache_size = 2, 3236 }; 3237 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 3238 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 3239 int rc; 3240 3241 rc = spdk_bdev_set_opts(&bdev_opts); 3242 CU_ASSERT(rc == 0); 3243 spdk_bdev_initialize(bdev_init_cb, NULL); 3244 3245 bdev = allocate_bdev("bdev0"); 3246 3247 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3248 CU_ASSERT(rc == 0); 3249 CU_ASSERT(desc != NULL); 3250 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3251 io_ch = spdk_bdev_get_io_channel(desc); 3252 CU_ASSERT(io_ch != NULL); 3253 channel = spdk_io_channel_get_ctx(io_ch); 3254 mgmt_ch = channel->shared_resource->mgmt_ch; 3255 3256 g_abort_done = false; 3257 3258 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 3259 3260 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3261 CU_ASSERT(rc == -ENOTSUP); 3262 3263 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 3264 3265 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 3266 CU_ASSERT(rc == 0); 3267 CU_ASSERT(g_abort_done == true); 3268 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 3269 3270 /* Test the case that the target I/O was successfully aborted. */ 3271 g_io_done = false; 3272 3273 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3274 CU_ASSERT(rc == 0); 3275 CU_ASSERT(g_io_done == false); 3276 3277 g_abort_done = false; 3278 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3279 3280 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3281 CU_ASSERT(rc == 0); 3282 CU_ASSERT(g_io_done == true); 3283 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3284 stub_complete_io(1); 3285 CU_ASSERT(g_abort_done == true); 3286 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3287 3288 /* Test the case that the target I/O was not aborted because it completed 3289 * in the middle of execution of the abort. 3290 */ 3291 g_io_done = false; 3292 3293 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3294 CU_ASSERT(rc == 0); 3295 CU_ASSERT(g_io_done == false); 3296 3297 g_abort_done = false; 3298 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3299 3300 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3301 CU_ASSERT(rc == 0); 3302 CU_ASSERT(g_io_done == false); 3303 3304 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3305 stub_complete_io(1); 3306 CU_ASSERT(g_io_done == true); 3307 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3308 3309 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3310 stub_complete_io(1); 3311 CU_ASSERT(g_abort_done == true); 3312 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3313 3314 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3315 3316 bdev->optimal_io_boundary = 16; 3317 bdev->split_on_optimal_io_boundary = true; 3318 3319 /* Test that a single-vector command which is split is aborted correctly. 3320 * Offset 14, length 8, payload 0xF000 3321 * Child - Offset 14, length 2, payload 0xF000 3322 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3323 */ 3324 g_io_done = false; 3325 3326 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 3327 CU_ASSERT(rc == 0); 3328 CU_ASSERT(g_io_done == false); 3329 3330 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3331 3332 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3333 3334 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3335 CU_ASSERT(rc == 0); 3336 CU_ASSERT(g_io_done == true); 3337 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3338 stub_complete_io(2); 3339 CU_ASSERT(g_abort_done == true); 3340 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3341 3342 /* Test that a multi-vector command that needs to be split by strip and then 3343 * needs to be split is aborted correctly. Abort is requested before the second 3344 * child I/O was submitted. The parent I/O should complete with failure without 3345 * submitting the second child I/O. 3346 */ 3347 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 3348 iov[i].iov_base = (void *)((i + 1) * 0x10000); 3349 iov[i].iov_len = 512; 3350 } 3351 3352 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 3353 g_io_done = false; 3354 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 3355 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 3356 CU_ASSERT(rc == 0); 3357 CU_ASSERT(g_io_done == false); 3358 3359 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3360 3361 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3362 3363 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3364 CU_ASSERT(rc == 0); 3365 CU_ASSERT(g_io_done == true); 3366 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3367 stub_complete_io(1); 3368 CU_ASSERT(g_abort_done == true); 3369 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3370 3371 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3372 3373 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3374 3375 bdev->optimal_io_boundary = 16; 3376 g_io_done = false; 3377 3378 /* Test that a ingle-vector command which is split is aborted correctly. 3379 * Differently from the above, the child abort request will be submitted 3380 * sequentially due to the capacity of spdk_bdev_io. 3381 */ 3382 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 3383 CU_ASSERT(rc == 0); 3384 CU_ASSERT(g_io_done == false); 3385 3386 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 3387 3388 g_abort_done = false; 3389 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3390 3391 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3392 CU_ASSERT(rc == 0); 3393 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3394 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 3395 3396 stub_complete_io(1); 3397 CU_ASSERT(g_io_done == true); 3398 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3399 stub_complete_io(3); 3400 CU_ASSERT(g_abort_done == true); 3401 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3402 3403 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3404 3405 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3406 3407 spdk_put_io_channel(io_ch); 3408 spdk_bdev_close(desc); 3409 free_bdev(bdev); 3410 spdk_bdev_finish(bdev_fini_cb, NULL); 3411 poll_threads(); 3412 } 3413 3414 int 3415 main(int argc, char **argv) 3416 { 3417 CU_pSuite suite = NULL; 3418 unsigned int num_failures; 3419 3420 CU_set_error_action(CUEA_ABORT); 3421 CU_initialize_registry(); 3422 3423 suite = CU_add_suite("bdev", null_init, null_clean); 3424 3425 CU_ADD_TEST(suite, bytes_to_blocks_test); 3426 CU_ADD_TEST(suite, num_blocks_test); 3427 CU_ADD_TEST(suite, io_valid_test); 3428 CU_ADD_TEST(suite, open_write_test); 3429 CU_ADD_TEST(suite, alias_add_del_test); 3430 CU_ADD_TEST(suite, get_device_stat_test); 3431 CU_ADD_TEST(suite, bdev_io_types_test); 3432 CU_ADD_TEST(suite, bdev_io_wait_test); 3433 CU_ADD_TEST(suite, bdev_io_spans_boundary_test); 3434 CU_ADD_TEST(suite, bdev_io_split_test); 3435 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 3436 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 3437 CU_ADD_TEST(suite, bdev_io_alignment); 3438 CU_ADD_TEST(suite, bdev_histograms); 3439 CU_ADD_TEST(suite, bdev_write_zeroes); 3440 CU_ADD_TEST(suite, bdev_compare_and_write); 3441 CU_ADD_TEST(suite, bdev_compare); 3442 CU_ADD_TEST(suite, bdev_open_while_hotremove); 3443 CU_ADD_TEST(suite, bdev_close_while_hotremove); 3444 CU_ADD_TEST(suite, bdev_open_ext); 3445 CU_ADD_TEST(suite, bdev_set_io_timeout); 3446 CU_ADD_TEST(suite, lba_range_overlap); 3447 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 3448 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 3449 CU_ADD_TEST(suite, lock_lba_range_overlapped); 3450 CU_ADD_TEST(suite, bdev_io_abort); 3451 3452 allocate_cores(1); 3453 allocate_threads(1); 3454 set_thread(0); 3455 3456 CU_basic_set_mode(CU_BRM_VERBOSE); 3457 CU_basic_run_tests(); 3458 num_failures = CU_get_number_of_failures(); 3459 CU_cleanup_registry(); 3460 3461 free_threads(); 3462 free_cores(); 3463 3464 return num_failures; 3465 } 3466