1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 struct spdk_trace_histories *g_trace_histories; 46 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 47 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 48 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 49 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 50 uint16_t tpoint_id, uint8_t owner_type, 51 uint8_t object_type, uint8_t new_object, 52 uint8_t arg1_type, const char *arg1_name)); 53 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 54 uint32_t size, uint64_t object_id, uint64_t arg1)); 55 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 56 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 57 58 59 int g_status; 60 int g_count; 61 enum spdk_bdev_event_type g_event_type1; 62 enum spdk_bdev_event_type g_event_type2; 63 struct spdk_histogram_data *g_histogram; 64 void *g_unregister_arg; 65 int g_unregister_rc; 66 67 void 68 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 69 int *sc, int *sk, int *asc, int *ascq) 70 { 71 } 72 73 static int 74 null_init(void) 75 { 76 return 0; 77 } 78 79 static int 80 null_clean(void) 81 { 82 return 0; 83 } 84 85 static int 86 stub_destruct(void *ctx) 87 { 88 return 0; 89 } 90 91 struct ut_expected_io { 92 uint8_t type; 93 uint64_t offset; 94 uint64_t length; 95 int iovcnt; 96 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 97 void *md_buf; 98 TAILQ_ENTRY(ut_expected_io) link; 99 }; 100 101 struct bdev_ut_channel { 102 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 103 uint32_t outstanding_io_count; 104 TAILQ_HEAD(, ut_expected_io) expected_io; 105 }; 106 107 static bool g_io_done; 108 static struct spdk_bdev_io *g_bdev_io; 109 static enum spdk_bdev_io_status g_io_status; 110 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 111 static uint32_t g_bdev_ut_io_device; 112 static struct bdev_ut_channel *g_bdev_ut_channel; 113 static void *g_compare_read_buf; 114 static uint32_t g_compare_read_buf_len; 115 static void *g_compare_write_buf; 116 static uint32_t g_compare_write_buf_len; 117 static bool g_abort_done; 118 static enum spdk_bdev_io_status g_abort_status; 119 120 static struct ut_expected_io * 121 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 122 { 123 struct ut_expected_io *expected_io; 124 125 expected_io = calloc(1, sizeof(*expected_io)); 126 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 127 128 expected_io->type = type; 129 expected_io->offset = offset; 130 expected_io->length = length; 131 expected_io->iovcnt = iovcnt; 132 133 return expected_io; 134 } 135 136 static void 137 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 138 { 139 expected_io->iov[pos].iov_base = base; 140 expected_io->iov[pos].iov_len = len; 141 } 142 143 static void 144 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 145 { 146 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 147 struct ut_expected_io *expected_io; 148 struct iovec *iov, *expected_iov; 149 struct spdk_bdev_io *bio_to_abort; 150 int i; 151 152 g_bdev_io = bdev_io; 153 154 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 155 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 156 157 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 158 CU_ASSERT(g_compare_read_buf_len == len); 159 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 160 } 161 162 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 163 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 164 165 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 166 CU_ASSERT(g_compare_write_buf_len == len); 167 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 168 } 169 170 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 171 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 172 173 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 174 CU_ASSERT(g_compare_read_buf_len == len); 175 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 176 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 177 } 178 } 179 180 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 181 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 182 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 183 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 184 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 185 ch->outstanding_io_count--; 186 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 187 break; 188 } 189 } 190 } 191 } 192 193 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 194 ch->outstanding_io_count++; 195 196 expected_io = TAILQ_FIRST(&ch->expected_io); 197 if (expected_io == NULL) { 198 return; 199 } 200 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 201 202 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 203 CU_ASSERT(bdev_io->type == expected_io->type); 204 } 205 206 if (expected_io->md_buf != NULL) { 207 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 208 } 209 210 if (expected_io->length == 0) { 211 free(expected_io); 212 return; 213 } 214 215 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 216 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 217 218 if (expected_io->iovcnt == 0) { 219 free(expected_io); 220 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 221 return; 222 } 223 224 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 225 for (i = 0; i < expected_io->iovcnt; i++) { 226 iov = &bdev_io->u.bdev.iovs[i]; 227 expected_iov = &expected_io->iov[i]; 228 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 229 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 230 } 231 232 free(expected_io); 233 } 234 235 static void 236 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 237 struct spdk_bdev_io *bdev_io, bool success) 238 { 239 CU_ASSERT(success == true); 240 241 stub_submit_request(_ch, bdev_io); 242 } 243 244 static void 245 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 246 { 247 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 248 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 249 } 250 251 static uint32_t 252 stub_complete_io(uint32_t num_to_complete) 253 { 254 struct bdev_ut_channel *ch = g_bdev_ut_channel; 255 struct spdk_bdev_io *bdev_io; 256 static enum spdk_bdev_io_status io_status; 257 uint32_t num_completed = 0; 258 259 while (num_completed < num_to_complete) { 260 if (TAILQ_EMPTY(&ch->outstanding_io)) { 261 break; 262 } 263 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 264 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 265 ch->outstanding_io_count--; 266 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 267 g_io_exp_status; 268 spdk_bdev_io_complete(bdev_io, io_status); 269 num_completed++; 270 } 271 272 return num_completed; 273 } 274 275 static struct spdk_io_channel * 276 bdev_ut_get_io_channel(void *ctx) 277 { 278 return spdk_get_io_channel(&g_bdev_ut_io_device); 279 } 280 281 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 282 [SPDK_BDEV_IO_TYPE_READ] = true, 283 [SPDK_BDEV_IO_TYPE_WRITE] = true, 284 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 285 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 286 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 287 [SPDK_BDEV_IO_TYPE_RESET] = true, 288 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 289 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 290 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 291 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 292 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 293 [SPDK_BDEV_IO_TYPE_ABORT] = true, 294 }; 295 296 static void 297 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 298 { 299 g_io_types_supported[io_type] = enable; 300 } 301 302 static bool 303 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 304 { 305 return g_io_types_supported[io_type]; 306 } 307 308 static struct spdk_bdev_fn_table fn_table = { 309 .destruct = stub_destruct, 310 .submit_request = stub_submit_request, 311 .get_io_channel = bdev_ut_get_io_channel, 312 .io_type_supported = stub_io_type_supported, 313 }; 314 315 static int 316 bdev_ut_create_ch(void *io_device, void *ctx_buf) 317 { 318 struct bdev_ut_channel *ch = ctx_buf; 319 320 CU_ASSERT(g_bdev_ut_channel == NULL); 321 g_bdev_ut_channel = ch; 322 323 TAILQ_INIT(&ch->outstanding_io); 324 ch->outstanding_io_count = 0; 325 TAILQ_INIT(&ch->expected_io); 326 return 0; 327 } 328 329 static void 330 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 331 { 332 CU_ASSERT(g_bdev_ut_channel != NULL); 333 g_bdev_ut_channel = NULL; 334 } 335 336 struct spdk_bdev_module bdev_ut_if; 337 338 static int 339 bdev_ut_module_init(void) 340 { 341 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 342 sizeof(struct bdev_ut_channel), NULL); 343 spdk_bdev_module_init_done(&bdev_ut_if); 344 return 0; 345 } 346 347 static void 348 bdev_ut_module_fini(void) 349 { 350 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 351 } 352 353 struct spdk_bdev_module bdev_ut_if = { 354 .name = "bdev_ut", 355 .module_init = bdev_ut_module_init, 356 .module_fini = bdev_ut_module_fini, 357 .async_init = true, 358 }; 359 360 static void vbdev_ut_examine(struct spdk_bdev *bdev); 361 362 static int 363 vbdev_ut_module_init(void) 364 { 365 return 0; 366 } 367 368 static void 369 vbdev_ut_module_fini(void) 370 { 371 } 372 373 struct spdk_bdev_module vbdev_ut_if = { 374 .name = "vbdev_ut", 375 .module_init = vbdev_ut_module_init, 376 .module_fini = vbdev_ut_module_fini, 377 .examine_config = vbdev_ut_examine, 378 }; 379 380 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 381 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 382 383 static void 384 vbdev_ut_examine(struct spdk_bdev *bdev) 385 { 386 spdk_bdev_module_examine_done(&vbdev_ut_if); 387 } 388 389 static struct spdk_bdev * 390 allocate_bdev(char *name) 391 { 392 struct spdk_bdev *bdev; 393 int rc; 394 395 bdev = calloc(1, sizeof(*bdev)); 396 SPDK_CU_ASSERT_FATAL(bdev != NULL); 397 398 bdev->name = name; 399 bdev->fn_table = &fn_table; 400 bdev->module = &bdev_ut_if; 401 bdev->blockcnt = 1024; 402 bdev->blocklen = 512; 403 404 rc = spdk_bdev_register(bdev); 405 CU_ASSERT(rc == 0); 406 407 return bdev; 408 } 409 410 static struct spdk_bdev * 411 allocate_vbdev(char *name) 412 { 413 struct spdk_bdev *bdev; 414 int rc; 415 416 bdev = calloc(1, sizeof(*bdev)); 417 SPDK_CU_ASSERT_FATAL(bdev != NULL); 418 419 bdev->name = name; 420 bdev->fn_table = &fn_table; 421 bdev->module = &vbdev_ut_if; 422 423 rc = spdk_bdev_register(bdev); 424 CU_ASSERT(rc == 0); 425 426 return bdev; 427 } 428 429 static void 430 free_bdev(struct spdk_bdev *bdev) 431 { 432 spdk_bdev_unregister(bdev, NULL, NULL); 433 poll_threads(); 434 memset(bdev, 0xFF, sizeof(*bdev)); 435 free(bdev); 436 } 437 438 static void 439 free_vbdev(struct spdk_bdev *bdev) 440 { 441 spdk_bdev_unregister(bdev, NULL, NULL); 442 poll_threads(); 443 memset(bdev, 0xFF, sizeof(*bdev)); 444 free(bdev); 445 } 446 447 static void 448 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 449 { 450 const char *bdev_name; 451 452 CU_ASSERT(bdev != NULL); 453 CU_ASSERT(rc == 0); 454 bdev_name = spdk_bdev_get_name(bdev); 455 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 456 457 free(stat); 458 free_bdev(bdev); 459 460 *(bool *)cb_arg = true; 461 } 462 463 static void 464 bdev_unregister_cb(void *cb_arg, int rc) 465 { 466 g_unregister_arg = cb_arg; 467 g_unregister_rc = rc; 468 } 469 470 static void 471 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 472 { 473 } 474 475 static void 476 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 477 { 478 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 479 480 g_event_type1 = type; 481 if (SPDK_BDEV_EVENT_REMOVE == type) { 482 spdk_bdev_close(desc); 483 } 484 } 485 486 static void 487 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 488 { 489 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 490 491 g_event_type2 = type; 492 if (SPDK_BDEV_EVENT_REMOVE == type) { 493 spdk_bdev_close(desc); 494 } 495 } 496 497 static void 498 get_device_stat_test(void) 499 { 500 struct spdk_bdev *bdev; 501 struct spdk_bdev_io_stat *stat; 502 bool done; 503 504 bdev = allocate_bdev("bdev0"); 505 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 506 if (stat == NULL) { 507 free_bdev(bdev); 508 return; 509 } 510 511 done = false; 512 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 513 while (!done) { poll_threads(); } 514 515 516 } 517 518 static void 519 open_write_test(void) 520 { 521 struct spdk_bdev *bdev[9]; 522 struct spdk_bdev_desc *desc[9] = {}; 523 int rc; 524 525 /* 526 * Create a tree of bdevs to test various open w/ write cases. 527 * 528 * bdev0 through bdev3 are physical block devices, such as NVMe 529 * namespaces or Ceph block devices. 530 * 531 * bdev4 is a virtual bdev with multiple base bdevs. This models 532 * caching or RAID use cases. 533 * 534 * bdev5 through bdev7 are all virtual bdevs with the same base 535 * bdev (except bdev7). This models partitioning or logical volume 536 * use cases. 537 * 538 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 539 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 540 * models caching, RAID, partitioning or logical volumes use cases. 541 * 542 * bdev8 is a virtual bdev with multiple base bdevs, but these 543 * base bdevs are themselves virtual bdevs. 544 * 545 * bdev8 546 * | 547 * +----------+ 548 * | | 549 * bdev4 bdev5 bdev6 bdev7 550 * | | | | 551 * +---+---+ +---+ + +---+---+ 552 * | | \ | / \ 553 * bdev0 bdev1 bdev2 bdev3 554 */ 555 556 bdev[0] = allocate_bdev("bdev0"); 557 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 558 CU_ASSERT(rc == 0); 559 560 bdev[1] = allocate_bdev("bdev1"); 561 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 562 CU_ASSERT(rc == 0); 563 564 bdev[2] = allocate_bdev("bdev2"); 565 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 566 CU_ASSERT(rc == 0); 567 568 bdev[3] = allocate_bdev("bdev3"); 569 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 570 CU_ASSERT(rc == 0); 571 572 bdev[4] = allocate_vbdev("bdev4"); 573 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 574 CU_ASSERT(rc == 0); 575 576 bdev[5] = allocate_vbdev("bdev5"); 577 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 578 CU_ASSERT(rc == 0); 579 580 bdev[6] = allocate_vbdev("bdev6"); 581 582 bdev[7] = allocate_vbdev("bdev7"); 583 584 bdev[8] = allocate_vbdev("bdev8"); 585 586 /* Open bdev0 read-only. This should succeed. */ 587 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 588 CU_ASSERT(rc == 0); 589 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 590 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 591 spdk_bdev_close(desc[0]); 592 593 /* 594 * Open bdev1 read/write. This should fail since bdev1 has been claimed 595 * by a vbdev module. 596 */ 597 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 598 CU_ASSERT(rc == -EPERM); 599 600 /* 601 * Open bdev4 read/write. This should fail since bdev3 has been claimed 602 * by a vbdev module. 603 */ 604 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 605 CU_ASSERT(rc == -EPERM); 606 607 /* Open bdev4 read-only. This should succeed. */ 608 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 609 CU_ASSERT(rc == 0); 610 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 611 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 612 spdk_bdev_close(desc[4]); 613 614 /* 615 * Open bdev8 read/write. This should succeed since it is a leaf 616 * bdev. 617 */ 618 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 619 CU_ASSERT(rc == 0); 620 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 621 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 622 spdk_bdev_close(desc[8]); 623 624 /* 625 * Open bdev5 read/write. This should fail since bdev4 has been claimed 626 * by a vbdev module. 627 */ 628 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 629 CU_ASSERT(rc == -EPERM); 630 631 /* Open bdev4 read-only. This should succeed. */ 632 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 633 CU_ASSERT(rc == 0); 634 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 635 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 636 spdk_bdev_close(desc[5]); 637 638 free_vbdev(bdev[8]); 639 640 free_vbdev(bdev[5]); 641 free_vbdev(bdev[6]); 642 free_vbdev(bdev[7]); 643 644 free_vbdev(bdev[4]); 645 646 free_bdev(bdev[0]); 647 free_bdev(bdev[1]); 648 free_bdev(bdev[2]); 649 free_bdev(bdev[3]); 650 } 651 652 static void 653 bytes_to_blocks_test(void) 654 { 655 struct spdk_bdev bdev; 656 uint64_t offset_blocks, num_blocks; 657 658 memset(&bdev, 0, sizeof(bdev)); 659 660 bdev.blocklen = 512; 661 662 /* All parameters valid */ 663 offset_blocks = 0; 664 num_blocks = 0; 665 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 666 CU_ASSERT(offset_blocks == 1); 667 CU_ASSERT(num_blocks == 2); 668 669 /* Offset not a block multiple */ 670 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 671 672 /* Length not a block multiple */ 673 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 674 675 /* In case blocklen not the power of two */ 676 bdev.blocklen = 100; 677 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 678 CU_ASSERT(offset_blocks == 1); 679 CU_ASSERT(num_blocks == 2); 680 681 /* Offset not a block multiple */ 682 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 683 684 /* Length not a block multiple */ 685 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 686 } 687 688 static void 689 num_blocks_test(void) 690 { 691 struct spdk_bdev bdev; 692 struct spdk_bdev_desc *desc = NULL; 693 struct spdk_bdev_desc *desc_ext = NULL; 694 int rc; 695 696 memset(&bdev, 0, sizeof(bdev)); 697 bdev.name = "num_blocks"; 698 bdev.fn_table = &fn_table; 699 bdev.module = &bdev_ut_if; 700 spdk_bdev_register(&bdev); 701 spdk_bdev_notify_blockcnt_change(&bdev, 50); 702 703 /* Growing block number */ 704 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 705 /* Shrinking block number */ 706 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 707 708 /* In case bdev opened */ 709 rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc); 710 CU_ASSERT(rc == 0); 711 SPDK_CU_ASSERT_FATAL(desc != NULL); 712 713 /* Growing block number */ 714 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 715 /* Shrinking block number */ 716 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 717 718 /* In case bdev opened with ext API */ 719 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext); 720 CU_ASSERT(rc == 0); 721 SPDK_CU_ASSERT_FATAL(desc_ext != NULL); 722 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc_ext)); 723 724 g_event_type1 = 0xFF; 725 /* Growing block number */ 726 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 727 728 poll_threads(); 729 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 730 731 g_event_type1 = 0xFF; 732 /* Growing block number and closing */ 733 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 734 735 spdk_bdev_close(desc); 736 spdk_bdev_close(desc_ext); 737 spdk_bdev_unregister(&bdev, NULL, NULL); 738 739 poll_threads(); 740 741 /* Callback is not called for closed device */ 742 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 743 } 744 745 static void 746 io_valid_test(void) 747 { 748 struct spdk_bdev bdev; 749 750 memset(&bdev, 0, sizeof(bdev)); 751 752 bdev.blocklen = 512; 753 spdk_bdev_notify_blockcnt_change(&bdev, 100); 754 755 /* All parameters valid */ 756 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 757 758 /* Last valid block */ 759 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 760 761 /* Offset past end of bdev */ 762 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 763 764 /* Offset + length past end of bdev */ 765 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 766 767 /* Offset near end of uint64_t range (2^64 - 1) */ 768 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 769 } 770 771 static void 772 alias_add_del_test(void) 773 { 774 struct spdk_bdev *bdev[3]; 775 int rc; 776 777 /* Creating and registering bdevs */ 778 bdev[0] = allocate_bdev("bdev0"); 779 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 780 781 bdev[1] = allocate_bdev("bdev1"); 782 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 783 784 bdev[2] = allocate_bdev("bdev2"); 785 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 786 787 poll_threads(); 788 789 /* 790 * Trying adding an alias identical to name. 791 * Alias is identical to name, so it can not be added to aliases list 792 */ 793 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 794 CU_ASSERT(rc == -EEXIST); 795 796 /* 797 * Trying to add empty alias, 798 * this one should fail 799 */ 800 rc = spdk_bdev_alias_add(bdev[0], NULL); 801 CU_ASSERT(rc == -EINVAL); 802 803 /* Trying adding same alias to two different registered bdevs */ 804 805 /* Alias is used first time, so this one should pass */ 806 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 807 CU_ASSERT(rc == 0); 808 809 /* Alias was added to another bdev, so this one should fail */ 810 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 811 CU_ASSERT(rc == -EEXIST); 812 813 /* Alias is used first time, so this one should pass */ 814 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 815 CU_ASSERT(rc == 0); 816 817 /* Trying removing an alias from registered bdevs */ 818 819 /* Alias is not on a bdev aliases list, so this one should fail */ 820 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 821 CU_ASSERT(rc == -ENOENT); 822 823 /* Alias is present on a bdev aliases list, so this one should pass */ 824 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 825 CU_ASSERT(rc == 0); 826 827 /* Alias is present on a bdev aliases list, so this one should pass */ 828 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 829 CU_ASSERT(rc == 0); 830 831 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 832 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 833 CU_ASSERT(rc != 0); 834 835 /* Trying to del all alias from empty alias list */ 836 spdk_bdev_alias_del_all(bdev[2]); 837 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 838 839 /* Trying to del all alias from non-empty alias list */ 840 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 841 CU_ASSERT(rc == 0); 842 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 843 CU_ASSERT(rc == 0); 844 spdk_bdev_alias_del_all(bdev[2]); 845 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 846 847 /* Unregister and free bdevs */ 848 spdk_bdev_unregister(bdev[0], NULL, NULL); 849 spdk_bdev_unregister(bdev[1], NULL, NULL); 850 spdk_bdev_unregister(bdev[2], NULL, NULL); 851 852 poll_threads(); 853 854 free(bdev[0]); 855 free(bdev[1]); 856 free(bdev[2]); 857 } 858 859 static void 860 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 861 { 862 g_io_done = true; 863 g_io_status = bdev_io->internal.status; 864 spdk_bdev_free_io(bdev_io); 865 } 866 867 static void 868 bdev_init_cb(void *arg, int rc) 869 { 870 CU_ASSERT(rc == 0); 871 } 872 873 static void 874 bdev_fini_cb(void *arg) 875 { 876 } 877 878 struct bdev_ut_io_wait_entry { 879 struct spdk_bdev_io_wait_entry entry; 880 struct spdk_io_channel *io_ch; 881 struct spdk_bdev_desc *desc; 882 bool submitted; 883 }; 884 885 static void 886 io_wait_cb(void *arg) 887 { 888 struct bdev_ut_io_wait_entry *entry = arg; 889 int rc; 890 891 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 892 CU_ASSERT(rc == 0); 893 entry->submitted = true; 894 } 895 896 static void 897 bdev_io_types_test(void) 898 { 899 struct spdk_bdev *bdev; 900 struct spdk_bdev_desc *desc = NULL; 901 struct spdk_io_channel *io_ch; 902 struct spdk_bdev_opts bdev_opts = { 903 .bdev_io_pool_size = 4, 904 .bdev_io_cache_size = 2, 905 }; 906 int rc; 907 908 rc = spdk_bdev_set_opts(&bdev_opts); 909 CU_ASSERT(rc == 0); 910 spdk_bdev_initialize(bdev_init_cb, NULL); 911 poll_threads(); 912 913 bdev = allocate_bdev("bdev0"); 914 915 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 916 CU_ASSERT(rc == 0); 917 poll_threads(); 918 SPDK_CU_ASSERT_FATAL(desc != NULL); 919 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 920 io_ch = spdk_bdev_get_io_channel(desc); 921 CU_ASSERT(io_ch != NULL); 922 923 /* WRITE and WRITE ZEROES are not supported */ 924 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 925 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 926 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 927 CU_ASSERT(rc == -ENOTSUP); 928 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 929 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 930 931 spdk_put_io_channel(io_ch); 932 spdk_bdev_close(desc); 933 free_bdev(bdev); 934 spdk_bdev_finish(bdev_fini_cb, NULL); 935 poll_threads(); 936 } 937 938 static void 939 bdev_io_wait_test(void) 940 { 941 struct spdk_bdev *bdev; 942 struct spdk_bdev_desc *desc = NULL; 943 struct spdk_io_channel *io_ch; 944 struct spdk_bdev_opts bdev_opts = { 945 .bdev_io_pool_size = 4, 946 .bdev_io_cache_size = 2, 947 }; 948 struct bdev_ut_io_wait_entry io_wait_entry; 949 struct bdev_ut_io_wait_entry io_wait_entry2; 950 int rc; 951 952 rc = spdk_bdev_set_opts(&bdev_opts); 953 CU_ASSERT(rc == 0); 954 spdk_bdev_initialize(bdev_init_cb, NULL); 955 poll_threads(); 956 957 bdev = allocate_bdev("bdev0"); 958 959 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 960 CU_ASSERT(rc == 0); 961 poll_threads(); 962 SPDK_CU_ASSERT_FATAL(desc != NULL); 963 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 964 io_ch = spdk_bdev_get_io_channel(desc); 965 CU_ASSERT(io_ch != NULL); 966 967 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 968 CU_ASSERT(rc == 0); 969 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 970 CU_ASSERT(rc == 0); 971 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 972 CU_ASSERT(rc == 0); 973 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 974 CU_ASSERT(rc == 0); 975 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 976 977 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 978 CU_ASSERT(rc == -ENOMEM); 979 980 io_wait_entry.entry.bdev = bdev; 981 io_wait_entry.entry.cb_fn = io_wait_cb; 982 io_wait_entry.entry.cb_arg = &io_wait_entry; 983 io_wait_entry.io_ch = io_ch; 984 io_wait_entry.desc = desc; 985 io_wait_entry.submitted = false; 986 /* Cannot use the same io_wait_entry for two different calls. */ 987 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 988 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 989 990 /* Queue two I/O waits. */ 991 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 992 CU_ASSERT(rc == 0); 993 CU_ASSERT(io_wait_entry.submitted == false); 994 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 995 CU_ASSERT(rc == 0); 996 CU_ASSERT(io_wait_entry2.submitted == false); 997 998 stub_complete_io(1); 999 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1000 CU_ASSERT(io_wait_entry.submitted == true); 1001 CU_ASSERT(io_wait_entry2.submitted == false); 1002 1003 stub_complete_io(1); 1004 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1005 CU_ASSERT(io_wait_entry2.submitted == true); 1006 1007 stub_complete_io(4); 1008 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1009 1010 spdk_put_io_channel(io_ch); 1011 spdk_bdev_close(desc); 1012 free_bdev(bdev); 1013 spdk_bdev_finish(bdev_fini_cb, NULL); 1014 poll_threads(); 1015 } 1016 1017 static void 1018 bdev_io_spans_boundary_test(void) 1019 { 1020 struct spdk_bdev bdev; 1021 struct spdk_bdev_io bdev_io; 1022 1023 memset(&bdev, 0, sizeof(bdev)); 1024 1025 bdev.optimal_io_boundary = 0; 1026 bdev_io.bdev = &bdev; 1027 1028 /* bdev has no optimal_io_boundary set - so this should return false. */ 1029 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1030 1031 bdev.optimal_io_boundary = 32; 1032 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1033 1034 /* RESETs are not based on LBAs - so this should return false. */ 1035 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1036 1037 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1038 bdev_io.u.bdev.offset_blocks = 0; 1039 bdev_io.u.bdev.num_blocks = 32; 1040 1041 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1042 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1043 1044 bdev_io.u.bdev.num_blocks = 33; 1045 1046 /* This I/O spans a boundary. */ 1047 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1048 } 1049 1050 static void 1051 bdev_io_split_test(void) 1052 { 1053 struct spdk_bdev *bdev; 1054 struct spdk_bdev_desc *desc = NULL; 1055 struct spdk_io_channel *io_ch; 1056 struct spdk_bdev_opts bdev_opts = { 1057 .bdev_io_pool_size = 512, 1058 .bdev_io_cache_size = 64, 1059 }; 1060 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1061 struct ut_expected_io *expected_io; 1062 uint64_t i; 1063 int rc; 1064 1065 rc = spdk_bdev_set_opts(&bdev_opts); 1066 CU_ASSERT(rc == 0); 1067 spdk_bdev_initialize(bdev_init_cb, NULL); 1068 1069 bdev = allocate_bdev("bdev0"); 1070 1071 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1072 CU_ASSERT(rc == 0); 1073 SPDK_CU_ASSERT_FATAL(desc != NULL); 1074 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1075 io_ch = spdk_bdev_get_io_channel(desc); 1076 CU_ASSERT(io_ch != NULL); 1077 1078 bdev->optimal_io_boundary = 16; 1079 bdev->split_on_optimal_io_boundary = false; 1080 1081 g_io_done = false; 1082 1083 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1084 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1085 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1086 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1087 1088 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1089 CU_ASSERT(rc == 0); 1090 CU_ASSERT(g_io_done == false); 1091 1092 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1093 stub_complete_io(1); 1094 CU_ASSERT(g_io_done == true); 1095 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1096 1097 bdev->split_on_optimal_io_boundary = true; 1098 1099 /* Now test that a single-vector command is split correctly. 1100 * Offset 14, length 8, payload 0xF000 1101 * Child - Offset 14, length 2, payload 0xF000 1102 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1103 * 1104 * Set up the expected values before calling spdk_bdev_read_blocks 1105 */ 1106 g_io_done = false; 1107 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1108 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1109 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1110 1111 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1112 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1113 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1114 1115 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1116 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1117 CU_ASSERT(rc == 0); 1118 CU_ASSERT(g_io_done == false); 1119 1120 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1121 stub_complete_io(2); 1122 CU_ASSERT(g_io_done == true); 1123 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1124 1125 /* Now set up a more complex, multi-vector command that needs to be split, 1126 * including splitting iovecs. 1127 */ 1128 iov[0].iov_base = (void *)0x10000; 1129 iov[0].iov_len = 512; 1130 iov[1].iov_base = (void *)0x20000; 1131 iov[1].iov_len = 20 * 512; 1132 iov[2].iov_base = (void *)0x30000; 1133 iov[2].iov_len = 11 * 512; 1134 1135 g_io_done = false; 1136 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1137 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1138 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1139 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1140 1141 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1142 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1143 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1144 1145 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1146 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1147 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1148 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1149 1150 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1151 CU_ASSERT(rc == 0); 1152 CU_ASSERT(g_io_done == false); 1153 1154 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1155 stub_complete_io(3); 1156 CU_ASSERT(g_io_done == true); 1157 1158 /* Test multi vector command that needs to be split by strip and then needs to be 1159 * split further due to the capacity of child iovs. 1160 */ 1161 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1162 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1163 iov[i].iov_len = 512; 1164 } 1165 1166 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1167 g_io_done = false; 1168 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1169 BDEV_IO_NUM_CHILD_IOV); 1170 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1171 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1172 } 1173 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1174 1175 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1176 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1177 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1178 ut_expected_io_set_iov(expected_io, i, 1179 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1180 } 1181 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1182 1183 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1184 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1185 CU_ASSERT(rc == 0); 1186 CU_ASSERT(g_io_done == false); 1187 1188 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1189 stub_complete_io(1); 1190 CU_ASSERT(g_io_done == false); 1191 1192 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1193 stub_complete_io(1); 1194 CU_ASSERT(g_io_done == true); 1195 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1196 1197 /* Test multi vector command that needs to be split by strip and then needs to be 1198 * split further due to the capacity of child iovs. In this case, the length of 1199 * the rest of iovec array with an I/O boundary is the multiple of block size. 1200 */ 1201 1202 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1203 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1204 */ 1205 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1206 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1207 iov[i].iov_len = 512; 1208 } 1209 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1210 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1211 iov[i].iov_len = 256; 1212 } 1213 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1214 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1215 1216 /* Add an extra iovec to trigger split */ 1217 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1218 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1219 1220 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1221 g_io_done = false; 1222 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1223 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1224 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1225 ut_expected_io_set_iov(expected_io, i, 1226 (void *)((i + 1) * 0x10000), 512); 1227 } 1228 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1229 ut_expected_io_set_iov(expected_io, i, 1230 (void *)((i + 1) * 0x10000), 256); 1231 } 1232 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1233 1234 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1235 1, 1); 1236 ut_expected_io_set_iov(expected_io, 0, 1237 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1238 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1239 1240 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1241 1, 1); 1242 ut_expected_io_set_iov(expected_io, 0, 1243 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1244 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1245 1246 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 1247 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1248 CU_ASSERT(rc == 0); 1249 CU_ASSERT(g_io_done == false); 1250 1251 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1252 stub_complete_io(1); 1253 CU_ASSERT(g_io_done == false); 1254 1255 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1256 stub_complete_io(2); 1257 CU_ASSERT(g_io_done == true); 1258 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1259 1260 /* Test multi vector command that needs to be split by strip and then needs to be 1261 * split further due to the capacity of child iovs, the child request offset should 1262 * be rewind to last aligned offset and go success without error. 1263 */ 1264 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1265 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1266 iov[i].iov_len = 512; 1267 } 1268 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1269 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1270 1271 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1272 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1273 1274 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1275 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1276 1277 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1278 g_io_done = false; 1279 g_io_status = 0; 1280 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1281 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1282 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1283 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1284 ut_expected_io_set_iov(expected_io, i, 1285 (void *)((i + 1) * 0x10000), 512); 1286 } 1287 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1288 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1289 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1290 1, 2); 1291 ut_expected_io_set_iov(expected_io, 0, 1292 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1293 ut_expected_io_set_iov(expected_io, 1, 1294 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1295 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1296 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1297 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1298 1, 1); 1299 ut_expected_io_set_iov(expected_io, 0, 1300 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1301 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1302 1303 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1304 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1305 CU_ASSERT(rc == 0); 1306 CU_ASSERT(g_io_done == false); 1307 1308 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1309 stub_complete_io(1); 1310 CU_ASSERT(g_io_done == false); 1311 1312 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1313 stub_complete_io(2); 1314 CU_ASSERT(g_io_done == true); 1315 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1316 1317 /* Test multi vector command that needs to be split due to the IO boundary and 1318 * the capacity of child iovs. Especially test the case when the command is 1319 * split due to the capacity of child iovs, the tail address is not aligned with 1320 * block size and is rewinded to the aligned address. 1321 * 1322 * The iovecs used in read request is complex but is based on the data 1323 * collected in the real issue. We change the base addresses but keep the lengths 1324 * not to loose the credibility of the test. 1325 */ 1326 bdev->optimal_io_boundary = 128; 1327 g_io_done = false; 1328 g_io_status = 0; 1329 1330 for (i = 0; i < 31; i++) { 1331 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1332 iov[i].iov_len = 1024; 1333 } 1334 iov[31].iov_base = (void *)0xFEED1F00000; 1335 iov[31].iov_len = 32768; 1336 iov[32].iov_base = (void *)0xFEED2000000; 1337 iov[32].iov_len = 160; 1338 iov[33].iov_base = (void *)0xFEED2100000; 1339 iov[33].iov_len = 4096; 1340 iov[34].iov_base = (void *)0xFEED2200000; 1341 iov[34].iov_len = 4096; 1342 iov[35].iov_base = (void *)0xFEED2300000; 1343 iov[35].iov_len = 4096; 1344 iov[36].iov_base = (void *)0xFEED2400000; 1345 iov[36].iov_len = 4096; 1346 iov[37].iov_base = (void *)0xFEED2500000; 1347 iov[37].iov_len = 4096; 1348 iov[38].iov_base = (void *)0xFEED2600000; 1349 iov[38].iov_len = 4096; 1350 iov[39].iov_base = (void *)0xFEED2700000; 1351 iov[39].iov_len = 4096; 1352 iov[40].iov_base = (void *)0xFEED2800000; 1353 iov[40].iov_len = 4096; 1354 iov[41].iov_base = (void *)0xFEED2900000; 1355 iov[41].iov_len = 4096; 1356 iov[42].iov_base = (void *)0xFEED2A00000; 1357 iov[42].iov_len = 4096; 1358 iov[43].iov_base = (void *)0xFEED2B00000; 1359 iov[43].iov_len = 12288; 1360 iov[44].iov_base = (void *)0xFEED2C00000; 1361 iov[44].iov_len = 8192; 1362 iov[45].iov_base = (void *)0xFEED2F00000; 1363 iov[45].iov_len = 4096; 1364 iov[46].iov_base = (void *)0xFEED3000000; 1365 iov[46].iov_len = 4096; 1366 iov[47].iov_base = (void *)0xFEED3100000; 1367 iov[47].iov_len = 4096; 1368 iov[48].iov_base = (void *)0xFEED3200000; 1369 iov[48].iov_len = 24576; 1370 iov[49].iov_base = (void *)0xFEED3300000; 1371 iov[49].iov_len = 16384; 1372 iov[50].iov_base = (void *)0xFEED3400000; 1373 iov[50].iov_len = 12288; 1374 iov[51].iov_base = (void *)0xFEED3500000; 1375 iov[51].iov_len = 4096; 1376 iov[52].iov_base = (void *)0xFEED3600000; 1377 iov[52].iov_len = 4096; 1378 iov[53].iov_base = (void *)0xFEED3700000; 1379 iov[53].iov_len = 4096; 1380 iov[54].iov_base = (void *)0xFEED3800000; 1381 iov[54].iov_len = 28672; 1382 iov[55].iov_base = (void *)0xFEED3900000; 1383 iov[55].iov_len = 20480; 1384 iov[56].iov_base = (void *)0xFEED3A00000; 1385 iov[56].iov_len = 4096; 1386 iov[57].iov_base = (void *)0xFEED3B00000; 1387 iov[57].iov_len = 12288; 1388 iov[58].iov_base = (void *)0xFEED3C00000; 1389 iov[58].iov_len = 4096; 1390 iov[59].iov_base = (void *)0xFEED3D00000; 1391 iov[59].iov_len = 4096; 1392 iov[60].iov_base = (void *)0xFEED3E00000; 1393 iov[60].iov_len = 352; 1394 1395 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1396 * of child iovs, 1397 */ 1398 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1399 for (i = 0; i < 32; i++) { 1400 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1401 } 1402 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1403 1404 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1405 * split by the IO boundary requirement. 1406 */ 1407 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1408 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1409 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1410 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1411 1412 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1413 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1414 */ 1415 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1416 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1417 iov[33].iov_len - 864); 1418 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1419 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1420 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1421 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1422 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1423 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1424 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1425 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1426 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1427 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1428 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1429 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1430 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1431 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1432 1433 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1434 * first 864 bytes of iov[52] split by the IO boundary requirement. 1435 */ 1436 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1437 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1438 iov[46].iov_len - 864); 1439 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1440 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1441 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1442 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1443 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1444 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1445 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1446 1447 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1448 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1449 */ 1450 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1451 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1452 iov[52].iov_len - 864); 1453 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1454 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1455 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1456 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1457 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1458 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1459 1460 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1461 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1462 */ 1463 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1464 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1465 iov[57].iov_len - 4960); 1466 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1467 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1468 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1469 1470 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1471 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1472 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1473 iov[59].iov_len - 3936); 1474 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1475 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1476 1477 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 61, 0, 543, io_done, NULL); 1478 CU_ASSERT(rc == 0); 1479 CU_ASSERT(g_io_done == false); 1480 1481 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1482 stub_complete_io(1); 1483 CU_ASSERT(g_io_done == false); 1484 1485 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1486 stub_complete_io(5); 1487 CU_ASSERT(g_io_done == false); 1488 1489 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1490 stub_complete_io(1); 1491 CU_ASSERT(g_io_done == true); 1492 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1493 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1494 1495 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1496 * split, so test that. 1497 */ 1498 bdev->optimal_io_boundary = 15; 1499 g_io_done = false; 1500 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1501 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1502 1503 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1504 CU_ASSERT(rc == 0); 1505 CU_ASSERT(g_io_done == false); 1506 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1507 stub_complete_io(1); 1508 CU_ASSERT(g_io_done == true); 1509 1510 /* Test an UNMAP. This should also not be split. */ 1511 bdev->optimal_io_boundary = 16; 1512 g_io_done = false; 1513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1514 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1515 1516 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1517 CU_ASSERT(rc == 0); 1518 CU_ASSERT(g_io_done == false); 1519 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1520 stub_complete_io(1); 1521 CU_ASSERT(g_io_done == true); 1522 1523 /* Test a FLUSH. This should also not be split. */ 1524 bdev->optimal_io_boundary = 16; 1525 g_io_done = false; 1526 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1527 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1528 1529 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1530 CU_ASSERT(rc == 0); 1531 CU_ASSERT(g_io_done == false); 1532 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1533 stub_complete_io(1); 1534 CU_ASSERT(g_io_done == true); 1535 1536 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1537 1538 /* Children requests return an error status */ 1539 bdev->optimal_io_boundary = 16; 1540 iov[0].iov_base = (void *)0x10000; 1541 iov[0].iov_len = 512 * 64; 1542 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1543 g_io_done = false; 1544 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1545 1546 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1547 CU_ASSERT(rc == 0); 1548 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1549 stub_complete_io(4); 1550 CU_ASSERT(g_io_done == false); 1551 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1552 stub_complete_io(1); 1553 CU_ASSERT(g_io_done == true); 1554 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1555 1556 /* Test if a multi vector command terminated with failure before continueing 1557 * splitting process when one of child I/O failed. 1558 * The multi vector command is as same as the above that needs to be split by strip 1559 * and then needs to be split further due to the capacity of child iovs. 1560 */ 1561 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1562 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1563 iov[i].iov_len = 512; 1564 } 1565 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1566 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1567 1568 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1569 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1570 1571 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1572 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1573 1574 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1575 1576 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1577 g_io_done = false; 1578 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1579 1580 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1581 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1582 CU_ASSERT(rc == 0); 1583 CU_ASSERT(g_io_done == false); 1584 1585 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1586 stub_complete_io(1); 1587 CU_ASSERT(g_io_done == true); 1588 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1589 1590 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1591 1592 /* for this test we will create the following conditions to hit the code path where 1593 * we are trying to send and IO following a split that has no iovs because we had to 1594 * trim them for alignment reasons. 1595 * 1596 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1597 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1598 * position 30 and overshoot by 0x2e. 1599 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1600 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1601 * which eliniates that vector so we just send the first split IO with 30 vectors 1602 * and let the completion pick up the last 2 vectors. 1603 */ 1604 bdev->optimal_io_boundary = 32; 1605 bdev->split_on_optimal_io_boundary = true; 1606 g_io_done = false; 1607 1608 /* Init all parent IOVs to 0x212 */ 1609 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1610 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1611 iov[i].iov_len = 0x212; 1612 } 1613 1614 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1615 BDEV_IO_NUM_CHILD_IOV - 1); 1616 /* expect 0-29 to be 1:1 with the parent iov */ 1617 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1618 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1619 } 1620 1621 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1622 * where 0x1e is the amount we overshot the 16K boundary 1623 */ 1624 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1625 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1626 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1627 1628 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1629 * shortened that take it to the next boundary and then a final one to get us to 1630 * 0x4200 bytes for the IO. 1631 */ 1632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1633 BDEV_IO_NUM_CHILD_IOV, 2); 1634 /* position 30 picked up the remaining bytes to the next boundary */ 1635 ut_expected_io_set_iov(expected_io, 0, 1636 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1637 1638 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1639 ut_expected_io_set_iov(expected_io, 1, 1640 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1641 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1642 1643 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1644 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1645 CU_ASSERT(rc == 0); 1646 CU_ASSERT(g_io_done == false); 1647 1648 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1649 stub_complete_io(1); 1650 CU_ASSERT(g_io_done == false); 1651 1652 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1653 stub_complete_io(1); 1654 CU_ASSERT(g_io_done == true); 1655 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1656 1657 spdk_put_io_channel(io_ch); 1658 spdk_bdev_close(desc); 1659 free_bdev(bdev); 1660 spdk_bdev_finish(bdev_fini_cb, NULL); 1661 poll_threads(); 1662 } 1663 1664 static void 1665 bdev_io_split_with_io_wait(void) 1666 { 1667 struct spdk_bdev *bdev; 1668 struct spdk_bdev_desc *desc = NULL; 1669 struct spdk_io_channel *io_ch; 1670 struct spdk_bdev_channel *channel; 1671 struct spdk_bdev_mgmt_channel *mgmt_ch; 1672 struct spdk_bdev_opts bdev_opts = { 1673 .bdev_io_pool_size = 2, 1674 .bdev_io_cache_size = 1, 1675 }; 1676 struct iovec iov[3]; 1677 struct ut_expected_io *expected_io; 1678 int rc; 1679 1680 rc = spdk_bdev_set_opts(&bdev_opts); 1681 CU_ASSERT(rc == 0); 1682 spdk_bdev_initialize(bdev_init_cb, NULL); 1683 1684 bdev = allocate_bdev("bdev0"); 1685 1686 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1687 CU_ASSERT(rc == 0); 1688 CU_ASSERT(desc != NULL); 1689 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1690 io_ch = spdk_bdev_get_io_channel(desc); 1691 CU_ASSERT(io_ch != NULL); 1692 channel = spdk_io_channel_get_ctx(io_ch); 1693 mgmt_ch = channel->shared_resource->mgmt_ch; 1694 1695 bdev->optimal_io_boundary = 16; 1696 bdev->split_on_optimal_io_boundary = true; 1697 1698 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1699 CU_ASSERT(rc == 0); 1700 1701 /* Now test that a single-vector command is split correctly. 1702 * Offset 14, length 8, payload 0xF000 1703 * Child - Offset 14, length 2, payload 0xF000 1704 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1705 * 1706 * Set up the expected values before calling spdk_bdev_read_blocks 1707 */ 1708 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1709 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1710 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1711 1712 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1713 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1714 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1715 1716 /* The following children will be submitted sequentially due to the capacity of 1717 * spdk_bdev_io. 1718 */ 1719 1720 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 1721 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1722 CU_ASSERT(rc == 0); 1723 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1724 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1725 1726 /* Completing the first read I/O will submit the first child */ 1727 stub_complete_io(1); 1728 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1729 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1730 1731 /* Completing the first child will submit the second child */ 1732 stub_complete_io(1); 1733 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1734 1735 /* Complete the second child I/O. This should result in our callback getting 1736 * invoked since the parent I/O is now complete. 1737 */ 1738 stub_complete_io(1); 1739 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1740 1741 /* Now set up a more complex, multi-vector command that needs to be split, 1742 * including splitting iovecs. 1743 */ 1744 iov[0].iov_base = (void *)0x10000; 1745 iov[0].iov_len = 512; 1746 iov[1].iov_base = (void *)0x20000; 1747 iov[1].iov_len = 20 * 512; 1748 iov[2].iov_base = (void *)0x30000; 1749 iov[2].iov_len = 11 * 512; 1750 1751 g_io_done = false; 1752 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1753 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1754 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1755 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1756 1757 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1758 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1759 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1760 1761 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1762 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1763 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1764 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1765 1766 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1767 CU_ASSERT(rc == 0); 1768 CU_ASSERT(g_io_done == false); 1769 1770 /* The following children will be submitted sequentially due to the capacity of 1771 * spdk_bdev_io. 1772 */ 1773 1774 /* Completing the first child will submit the second child */ 1775 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1776 stub_complete_io(1); 1777 CU_ASSERT(g_io_done == false); 1778 1779 /* Completing the second child will submit the third child */ 1780 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1781 stub_complete_io(1); 1782 CU_ASSERT(g_io_done == false); 1783 1784 /* Completing the third child will result in our callback getting invoked 1785 * since the parent I/O is now complete. 1786 */ 1787 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1788 stub_complete_io(1); 1789 CU_ASSERT(g_io_done == true); 1790 1791 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1792 1793 spdk_put_io_channel(io_ch); 1794 spdk_bdev_close(desc); 1795 free_bdev(bdev); 1796 spdk_bdev_finish(bdev_fini_cb, NULL); 1797 poll_threads(); 1798 } 1799 1800 static void 1801 bdev_io_alignment(void) 1802 { 1803 struct spdk_bdev *bdev; 1804 struct spdk_bdev_desc *desc = NULL; 1805 struct spdk_io_channel *io_ch; 1806 struct spdk_bdev_opts bdev_opts = { 1807 .bdev_io_pool_size = 20, 1808 .bdev_io_cache_size = 2, 1809 }; 1810 int rc; 1811 void *buf = NULL; 1812 struct iovec iovs[2]; 1813 int iovcnt; 1814 uint64_t alignment; 1815 1816 rc = spdk_bdev_set_opts(&bdev_opts); 1817 CU_ASSERT(rc == 0); 1818 spdk_bdev_initialize(bdev_init_cb, NULL); 1819 1820 fn_table.submit_request = stub_submit_request_get_buf; 1821 bdev = allocate_bdev("bdev0"); 1822 1823 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1824 CU_ASSERT(rc == 0); 1825 CU_ASSERT(desc != NULL); 1826 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1827 io_ch = spdk_bdev_get_io_channel(desc); 1828 CU_ASSERT(io_ch != NULL); 1829 1830 /* Create aligned buffer */ 1831 rc = posix_memalign(&buf, 4096, 8192); 1832 SPDK_CU_ASSERT_FATAL(rc == 0); 1833 1834 /* Pass aligned single buffer with no alignment required */ 1835 alignment = 1; 1836 bdev->required_alignment = spdk_u32log2(alignment); 1837 1838 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1839 CU_ASSERT(rc == 0); 1840 stub_complete_io(1); 1841 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1842 alignment)); 1843 1844 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1845 CU_ASSERT(rc == 0); 1846 stub_complete_io(1); 1847 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1848 alignment)); 1849 1850 /* Pass unaligned single buffer with no alignment required */ 1851 alignment = 1; 1852 bdev->required_alignment = spdk_u32log2(alignment); 1853 1854 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1855 CU_ASSERT(rc == 0); 1856 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1857 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1858 stub_complete_io(1); 1859 1860 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1861 CU_ASSERT(rc == 0); 1862 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1863 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1864 stub_complete_io(1); 1865 1866 /* Pass unaligned single buffer with 512 alignment required */ 1867 alignment = 512; 1868 bdev->required_alignment = spdk_u32log2(alignment); 1869 1870 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1871 CU_ASSERT(rc == 0); 1872 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1873 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1874 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1875 alignment)); 1876 stub_complete_io(1); 1877 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1878 1879 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1880 CU_ASSERT(rc == 0); 1881 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1882 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1883 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1884 alignment)); 1885 stub_complete_io(1); 1886 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1887 1888 /* Pass unaligned single buffer with 4096 alignment required */ 1889 alignment = 4096; 1890 bdev->required_alignment = spdk_u32log2(alignment); 1891 1892 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1893 CU_ASSERT(rc == 0); 1894 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1895 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1896 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1897 alignment)); 1898 stub_complete_io(1); 1899 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1900 1901 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1902 CU_ASSERT(rc == 0); 1903 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1904 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1905 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1906 alignment)); 1907 stub_complete_io(1); 1908 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1909 1910 /* Pass aligned iovs with no alignment required */ 1911 alignment = 1; 1912 bdev->required_alignment = spdk_u32log2(alignment); 1913 1914 iovcnt = 1; 1915 iovs[0].iov_base = buf; 1916 iovs[0].iov_len = 512; 1917 1918 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1919 CU_ASSERT(rc == 0); 1920 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1921 stub_complete_io(1); 1922 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1923 1924 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1925 CU_ASSERT(rc == 0); 1926 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1927 stub_complete_io(1); 1928 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1929 1930 /* Pass unaligned iovs with no alignment required */ 1931 alignment = 1; 1932 bdev->required_alignment = spdk_u32log2(alignment); 1933 1934 iovcnt = 2; 1935 iovs[0].iov_base = buf + 16; 1936 iovs[0].iov_len = 256; 1937 iovs[1].iov_base = buf + 16 + 256 + 32; 1938 iovs[1].iov_len = 256; 1939 1940 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1941 CU_ASSERT(rc == 0); 1942 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1943 stub_complete_io(1); 1944 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1945 1946 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1947 CU_ASSERT(rc == 0); 1948 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1949 stub_complete_io(1); 1950 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1951 1952 /* Pass unaligned iov with 2048 alignment required */ 1953 alignment = 2048; 1954 bdev->required_alignment = spdk_u32log2(alignment); 1955 1956 iovcnt = 2; 1957 iovs[0].iov_base = buf + 16; 1958 iovs[0].iov_len = 256; 1959 iovs[1].iov_base = buf + 16 + 256 + 32; 1960 iovs[1].iov_len = 256; 1961 1962 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1963 CU_ASSERT(rc == 0); 1964 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1965 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1966 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1967 alignment)); 1968 stub_complete_io(1); 1969 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1970 1971 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1972 CU_ASSERT(rc == 0); 1973 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1974 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1975 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1976 alignment)); 1977 stub_complete_io(1); 1978 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1979 1980 /* Pass iov without allocated buffer without alignment required */ 1981 alignment = 1; 1982 bdev->required_alignment = spdk_u32log2(alignment); 1983 1984 iovcnt = 1; 1985 iovs[0].iov_base = NULL; 1986 iovs[0].iov_len = 0; 1987 1988 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1989 CU_ASSERT(rc == 0); 1990 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1991 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1992 alignment)); 1993 stub_complete_io(1); 1994 1995 /* Pass iov without allocated buffer with 1024 alignment required */ 1996 alignment = 1024; 1997 bdev->required_alignment = spdk_u32log2(alignment); 1998 1999 iovcnt = 1; 2000 iovs[0].iov_base = NULL; 2001 iovs[0].iov_len = 0; 2002 2003 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2004 CU_ASSERT(rc == 0); 2005 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2006 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2007 alignment)); 2008 stub_complete_io(1); 2009 2010 spdk_put_io_channel(io_ch); 2011 spdk_bdev_close(desc); 2012 free_bdev(bdev); 2013 fn_table.submit_request = stub_submit_request; 2014 spdk_bdev_finish(bdev_fini_cb, NULL); 2015 poll_threads(); 2016 2017 free(buf); 2018 } 2019 2020 static void 2021 bdev_io_alignment_with_boundary(void) 2022 { 2023 struct spdk_bdev *bdev; 2024 struct spdk_bdev_desc *desc = NULL; 2025 struct spdk_io_channel *io_ch; 2026 struct spdk_bdev_opts bdev_opts = { 2027 .bdev_io_pool_size = 20, 2028 .bdev_io_cache_size = 2, 2029 }; 2030 int rc; 2031 void *buf = NULL; 2032 struct iovec iovs[2]; 2033 int iovcnt; 2034 uint64_t alignment; 2035 2036 rc = spdk_bdev_set_opts(&bdev_opts); 2037 CU_ASSERT(rc == 0); 2038 spdk_bdev_initialize(bdev_init_cb, NULL); 2039 2040 fn_table.submit_request = stub_submit_request_get_buf; 2041 bdev = allocate_bdev("bdev0"); 2042 2043 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2044 CU_ASSERT(rc == 0); 2045 CU_ASSERT(desc != NULL); 2046 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2047 io_ch = spdk_bdev_get_io_channel(desc); 2048 CU_ASSERT(io_ch != NULL); 2049 2050 /* Create aligned buffer */ 2051 rc = posix_memalign(&buf, 4096, 131072); 2052 SPDK_CU_ASSERT_FATAL(rc == 0); 2053 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2054 2055 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2056 alignment = 512; 2057 bdev->required_alignment = spdk_u32log2(alignment); 2058 bdev->optimal_io_boundary = 2; 2059 bdev->split_on_optimal_io_boundary = true; 2060 2061 iovcnt = 1; 2062 iovs[0].iov_base = NULL; 2063 iovs[0].iov_len = 512 * 3; 2064 2065 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2066 CU_ASSERT(rc == 0); 2067 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2068 stub_complete_io(2); 2069 2070 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2071 alignment = 512; 2072 bdev->required_alignment = spdk_u32log2(alignment); 2073 bdev->optimal_io_boundary = 16; 2074 bdev->split_on_optimal_io_boundary = true; 2075 2076 iovcnt = 1; 2077 iovs[0].iov_base = NULL; 2078 iovs[0].iov_len = 512 * 16; 2079 2080 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2081 CU_ASSERT(rc == 0); 2082 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2083 stub_complete_io(2); 2084 2085 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2086 alignment = 512; 2087 bdev->required_alignment = spdk_u32log2(alignment); 2088 bdev->optimal_io_boundary = 128; 2089 bdev->split_on_optimal_io_boundary = true; 2090 2091 iovcnt = 1; 2092 iovs[0].iov_base = buf + 16; 2093 iovs[0].iov_len = 512 * 160; 2094 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2095 CU_ASSERT(rc == 0); 2096 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2097 stub_complete_io(2); 2098 2099 /* 512 * 3 with 2 IO boundary */ 2100 alignment = 512; 2101 bdev->required_alignment = spdk_u32log2(alignment); 2102 bdev->optimal_io_boundary = 2; 2103 bdev->split_on_optimal_io_boundary = true; 2104 2105 iovcnt = 2; 2106 iovs[0].iov_base = buf + 16; 2107 iovs[0].iov_len = 512; 2108 iovs[1].iov_base = buf + 16 + 512 + 32; 2109 iovs[1].iov_len = 1024; 2110 2111 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2112 CU_ASSERT(rc == 0); 2113 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2114 stub_complete_io(2); 2115 2116 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2117 CU_ASSERT(rc == 0); 2118 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2119 stub_complete_io(2); 2120 2121 /* 512 * 64 with 32 IO boundary */ 2122 bdev->optimal_io_boundary = 32; 2123 iovcnt = 2; 2124 iovs[0].iov_base = buf + 16; 2125 iovs[0].iov_len = 16384; 2126 iovs[1].iov_base = buf + 16 + 16384 + 32; 2127 iovs[1].iov_len = 16384; 2128 2129 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2130 CU_ASSERT(rc == 0); 2131 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2132 stub_complete_io(3); 2133 2134 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2135 CU_ASSERT(rc == 0); 2136 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2137 stub_complete_io(3); 2138 2139 /* 512 * 160 with 32 IO boundary */ 2140 iovcnt = 1; 2141 iovs[0].iov_base = buf + 16; 2142 iovs[0].iov_len = 16384 + 65536; 2143 2144 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2145 CU_ASSERT(rc == 0); 2146 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2147 stub_complete_io(6); 2148 2149 spdk_put_io_channel(io_ch); 2150 spdk_bdev_close(desc); 2151 free_bdev(bdev); 2152 fn_table.submit_request = stub_submit_request; 2153 spdk_bdev_finish(bdev_fini_cb, NULL); 2154 poll_threads(); 2155 2156 free(buf); 2157 } 2158 2159 static void 2160 histogram_status_cb(void *cb_arg, int status) 2161 { 2162 g_status = status; 2163 } 2164 2165 static void 2166 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 2167 { 2168 g_status = status; 2169 g_histogram = histogram; 2170 } 2171 2172 static void 2173 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 2174 uint64_t total, uint64_t so_far) 2175 { 2176 g_count += count; 2177 } 2178 2179 static void 2180 bdev_histograms(void) 2181 { 2182 struct spdk_bdev *bdev; 2183 struct spdk_bdev_desc *desc = NULL; 2184 struct spdk_io_channel *ch; 2185 struct spdk_histogram_data *histogram; 2186 uint8_t buf[4096]; 2187 int rc; 2188 2189 spdk_bdev_initialize(bdev_init_cb, NULL); 2190 2191 bdev = allocate_bdev("bdev"); 2192 2193 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2194 CU_ASSERT(rc == 0); 2195 CU_ASSERT(desc != NULL); 2196 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2197 2198 ch = spdk_bdev_get_io_channel(desc); 2199 CU_ASSERT(ch != NULL); 2200 2201 /* Enable histogram */ 2202 g_status = -1; 2203 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 2204 poll_threads(); 2205 CU_ASSERT(g_status == 0); 2206 CU_ASSERT(bdev->internal.histogram_enabled == true); 2207 2208 /* Allocate histogram */ 2209 histogram = spdk_histogram_data_alloc(); 2210 SPDK_CU_ASSERT_FATAL(histogram != NULL); 2211 2212 /* Check if histogram is zeroed */ 2213 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2214 poll_threads(); 2215 CU_ASSERT(g_status == 0); 2216 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2217 2218 g_count = 0; 2219 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2220 2221 CU_ASSERT(g_count == 0); 2222 2223 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2224 CU_ASSERT(rc == 0); 2225 2226 spdk_delay_us(10); 2227 stub_complete_io(1); 2228 poll_threads(); 2229 2230 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2231 CU_ASSERT(rc == 0); 2232 2233 spdk_delay_us(10); 2234 stub_complete_io(1); 2235 poll_threads(); 2236 2237 /* Check if histogram gathered data from all I/O channels */ 2238 g_histogram = NULL; 2239 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2240 poll_threads(); 2241 CU_ASSERT(g_status == 0); 2242 CU_ASSERT(bdev->internal.histogram_enabled == true); 2243 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2244 2245 g_count = 0; 2246 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2247 CU_ASSERT(g_count == 2); 2248 2249 /* Disable histogram */ 2250 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 2251 poll_threads(); 2252 CU_ASSERT(g_status == 0); 2253 CU_ASSERT(bdev->internal.histogram_enabled == false); 2254 2255 /* Try to run histogram commands on disabled bdev */ 2256 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2257 poll_threads(); 2258 CU_ASSERT(g_status == -EFAULT); 2259 2260 spdk_histogram_data_free(histogram); 2261 spdk_put_io_channel(ch); 2262 spdk_bdev_close(desc); 2263 free_bdev(bdev); 2264 spdk_bdev_finish(bdev_fini_cb, NULL); 2265 poll_threads(); 2266 } 2267 2268 static void 2269 _bdev_compare(bool emulated) 2270 { 2271 struct spdk_bdev *bdev; 2272 struct spdk_bdev_desc *desc = NULL; 2273 struct spdk_io_channel *ioch; 2274 struct ut_expected_io *expected_io; 2275 uint64_t offset, num_blocks; 2276 uint32_t num_completed; 2277 char aa_buf[512]; 2278 char bb_buf[512]; 2279 struct iovec compare_iov; 2280 uint8_t io_type; 2281 int rc; 2282 2283 if (emulated) { 2284 io_type = SPDK_BDEV_IO_TYPE_READ; 2285 } else { 2286 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 2287 } 2288 2289 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2290 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2291 2292 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 2293 2294 spdk_bdev_initialize(bdev_init_cb, NULL); 2295 fn_table.submit_request = stub_submit_request_get_buf; 2296 bdev = allocate_bdev("bdev"); 2297 2298 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2299 CU_ASSERT_EQUAL(rc, 0); 2300 SPDK_CU_ASSERT_FATAL(desc != NULL); 2301 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2302 ioch = spdk_bdev_get_io_channel(desc); 2303 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2304 2305 fn_table.submit_request = stub_submit_request_get_buf; 2306 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2307 2308 offset = 50; 2309 num_blocks = 1; 2310 compare_iov.iov_base = aa_buf; 2311 compare_iov.iov_len = sizeof(aa_buf); 2312 2313 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2314 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2315 2316 g_io_done = false; 2317 g_compare_read_buf = aa_buf; 2318 g_compare_read_buf_len = sizeof(aa_buf); 2319 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2320 CU_ASSERT_EQUAL(rc, 0); 2321 num_completed = stub_complete_io(1); 2322 CU_ASSERT_EQUAL(num_completed, 1); 2323 CU_ASSERT(g_io_done == true); 2324 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2325 2326 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2327 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2328 2329 g_io_done = false; 2330 g_compare_read_buf = bb_buf; 2331 g_compare_read_buf_len = sizeof(bb_buf); 2332 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2333 CU_ASSERT_EQUAL(rc, 0); 2334 num_completed = stub_complete_io(1); 2335 CU_ASSERT_EQUAL(num_completed, 1); 2336 CU_ASSERT(g_io_done == true); 2337 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2338 2339 spdk_put_io_channel(ioch); 2340 spdk_bdev_close(desc); 2341 free_bdev(bdev); 2342 fn_table.submit_request = stub_submit_request; 2343 spdk_bdev_finish(bdev_fini_cb, NULL); 2344 poll_threads(); 2345 2346 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2347 2348 g_compare_read_buf = NULL; 2349 } 2350 2351 static void 2352 bdev_compare(void) 2353 { 2354 _bdev_compare(true); 2355 _bdev_compare(false); 2356 } 2357 2358 static void 2359 bdev_compare_and_write(void) 2360 { 2361 struct spdk_bdev *bdev; 2362 struct spdk_bdev_desc *desc = NULL; 2363 struct spdk_io_channel *ioch; 2364 struct ut_expected_io *expected_io; 2365 uint64_t offset, num_blocks; 2366 uint32_t num_completed; 2367 char aa_buf[512]; 2368 char bb_buf[512]; 2369 char cc_buf[512]; 2370 char write_buf[512]; 2371 struct iovec compare_iov; 2372 struct iovec write_iov; 2373 int rc; 2374 2375 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2376 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2377 memset(cc_buf, 0xcc, sizeof(cc_buf)); 2378 2379 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 2380 2381 spdk_bdev_initialize(bdev_init_cb, NULL); 2382 fn_table.submit_request = stub_submit_request_get_buf; 2383 bdev = allocate_bdev("bdev"); 2384 2385 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2386 CU_ASSERT_EQUAL(rc, 0); 2387 SPDK_CU_ASSERT_FATAL(desc != NULL); 2388 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2389 ioch = spdk_bdev_get_io_channel(desc); 2390 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2391 2392 fn_table.submit_request = stub_submit_request_get_buf; 2393 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2394 2395 offset = 50; 2396 num_blocks = 1; 2397 compare_iov.iov_base = aa_buf; 2398 compare_iov.iov_len = sizeof(aa_buf); 2399 write_iov.iov_base = bb_buf; 2400 write_iov.iov_len = sizeof(bb_buf); 2401 2402 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2403 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2404 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 2405 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2406 2407 g_io_done = false; 2408 g_compare_read_buf = aa_buf; 2409 g_compare_read_buf_len = sizeof(aa_buf); 2410 memset(write_buf, 0, sizeof(write_buf)); 2411 g_compare_write_buf = write_buf; 2412 g_compare_write_buf_len = sizeof(write_buf); 2413 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2414 offset, num_blocks, io_done, NULL); 2415 /* Trigger range locking */ 2416 poll_threads(); 2417 CU_ASSERT_EQUAL(rc, 0); 2418 num_completed = stub_complete_io(1); 2419 CU_ASSERT_EQUAL(num_completed, 1); 2420 CU_ASSERT(g_io_done == false); 2421 num_completed = stub_complete_io(1); 2422 /* Trigger range unlocking */ 2423 poll_threads(); 2424 CU_ASSERT_EQUAL(num_completed, 1); 2425 CU_ASSERT(g_io_done == true); 2426 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2427 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 2428 2429 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2431 2432 g_io_done = false; 2433 g_compare_read_buf = cc_buf; 2434 g_compare_read_buf_len = sizeof(cc_buf); 2435 memset(write_buf, 0, sizeof(write_buf)); 2436 g_compare_write_buf = write_buf; 2437 g_compare_write_buf_len = sizeof(write_buf); 2438 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2439 offset, num_blocks, io_done, NULL); 2440 /* Trigger range locking */ 2441 poll_threads(); 2442 CU_ASSERT_EQUAL(rc, 0); 2443 num_completed = stub_complete_io(1); 2444 /* Trigger range unlocking earlier because we expect error here */ 2445 poll_threads(); 2446 CU_ASSERT_EQUAL(num_completed, 1); 2447 CU_ASSERT(g_io_done == true); 2448 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2449 num_completed = stub_complete_io(1); 2450 CU_ASSERT_EQUAL(num_completed, 0); 2451 2452 spdk_put_io_channel(ioch); 2453 spdk_bdev_close(desc); 2454 free_bdev(bdev); 2455 fn_table.submit_request = stub_submit_request; 2456 spdk_bdev_finish(bdev_fini_cb, NULL); 2457 poll_threads(); 2458 2459 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2460 2461 g_compare_read_buf = NULL; 2462 g_compare_write_buf = NULL; 2463 } 2464 2465 static void 2466 bdev_write_zeroes(void) 2467 { 2468 struct spdk_bdev *bdev; 2469 struct spdk_bdev_desc *desc = NULL; 2470 struct spdk_io_channel *ioch; 2471 struct ut_expected_io *expected_io; 2472 uint64_t offset, num_io_blocks, num_blocks; 2473 uint32_t num_completed, num_requests; 2474 int rc; 2475 2476 spdk_bdev_initialize(bdev_init_cb, NULL); 2477 bdev = allocate_bdev("bdev"); 2478 2479 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2480 CU_ASSERT_EQUAL(rc, 0); 2481 SPDK_CU_ASSERT_FATAL(desc != NULL); 2482 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2483 ioch = spdk_bdev_get_io_channel(desc); 2484 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2485 2486 fn_table.submit_request = stub_submit_request; 2487 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2488 2489 /* First test that if the bdev supports write_zeroes, the request won't be split */ 2490 bdev->md_len = 0; 2491 bdev->blocklen = 4096; 2492 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2493 2494 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 2495 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2496 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2497 CU_ASSERT_EQUAL(rc, 0); 2498 num_completed = stub_complete_io(1); 2499 CU_ASSERT_EQUAL(num_completed, 1); 2500 2501 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 2502 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 2503 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 2504 num_requests = 2; 2505 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 2506 2507 for (offset = 0; offset < num_requests; ++offset) { 2508 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2509 offset * num_io_blocks, num_io_blocks, 0); 2510 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2511 } 2512 2513 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2514 CU_ASSERT_EQUAL(rc, 0); 2515 num_completed = stub_complete_io(num_requests); 2516 CU_ASSERT_EQUAL(num_completed, num_requests); 2517 2518 /* Check that the splitting is correct if bdev has interleaved metadata */ 2519 bdev->md_interleave = true; 2520 bdev->md_len = 64; 2521 bdev->blocklen = 4096 + 64; 2522 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2523 2524 num_requests = offset = 0; 2525 while (offset < num_blocks) { 2526 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 2527 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2528 offset, num_io_blocks, 0); 2529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2530 offset += num_io_blocks; 2531 num_requests++; 2532 } 2533 2534 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2535 CU_ASSERT_EQUAL(rc, 0); 2536 num_completed = stub_complete_io(num_requests); 2537 CU_ASSERT_EQUAL(num_completed, num_requests); 2538 num_completed = stub_complete_io(num_requests); 2539 assert(num_completed == 0); 2540 2541 /* Check the the same for separate metadata buffer */ 2542 bdev->md_interleave = false; 2543 bdev->md_len = 64; 2544 bdev->blocklen = 4096; 2545 2546 num_requests = offset = 0; 2547 while (offset < num_blocks) { 2548 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 2549 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2550 offset, num_io_blocks, 0); 2551 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 2552 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2553 offset += num_io_blocks; 2554 num_requests++; 2555 } 2556 2557 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2558 CU_ASSERT_EQUAL(rc, 0); 2559 num_completed = stub_complete_io(num_requests); 2560 CU_ASSERT_EQUAL(num_completed, num_requests); 2561 2562 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 2563 spdk_put_io_channel(ioch); 2564 spdk_bdev_close(desc); 2565 free_bdev(bdev); 2566 spdk_bdev_finish(bdev_fini_cb, NULL); 2567 poll_threads(); 2568 } 2569 2570 static void 2571 bdev_open_while_hotremove(void) 2572 { 2573 struct spdk_bdev *bdev; 2574 struct spdk_bdev_desc *desc[2] = {}; 2575 int rc; 2576 2577 bdev = allocate_bdev("bdev"); 2578 2579 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 2580 CU_ASSERT(rc == 0); 2581 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 2582 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 2583 2584 spdk_bdev_unregister(bdev, NULL, NULL); 2585 2586 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 2587 CU_ASSERT(rc == -ENODEV); 2588 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 2589 2590 spdk_bdev_close(desc[0]); 2591 free_bdev(bdev); 2592 } 2593 2594 static void 2595 bdev_close_while_hotremove(void) 2596 { 2597 struct spdk_bdev *bdev; 2598 struct spdk_bdev_desc *desc = NULL; 2599 int rc = 0; 2600 2601 bdev = allocate_bdev("bdev"); 2602 2603 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 2604 CU_ASSERT_EQUAL(rc, 0); 2605 SPDK_CU_ASSERT_FATAL(desc != NULL); 2606 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2607 2608 /* Simulate hot-unplug by unregistering bdev */ 2609 g_event_type1 = 0xFF; 2610 g_unregister_arg = NULL; 2611 g_unregister_rc = -1; 2612 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 2613 /* Close device while remove event is in flight */ 2614 spdk_bdev_close(desc); 2615 2616 /* Ensure that unregister callback is delayed */ 2617 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 2618 CU_ASSERT_EQUAL(g_unregister_rc, -1); 2619 2620 poll_threads(); 2621 2622 /* Event callback shall not be issued because device was closed */ 2623 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 2624 /* Unregister callback is issued */ 2625 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 2626 CU_ASSERT_EQUAL(g_unregister_rc, 0); 2627 2628 free_bdev(bdev); 2629 } 2630 2631 static void 2632 bdev_open_ext(void) 2633 { 2634 struct spdk_bdev *bdev; 2635 struct spdk_bdev_desc *desc1 = NULL; 2636 struct spdk_bdev_desc *desc2 = NULL; 2637 int rc = 0; 2638 2639 bdev = allocate_bdev("bdev"); 2640 2641 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 2642 CU_ASSERT_EQUAL(rc, -EINVAL); 2643 2644 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 2645 CU_ASSERT_EQUAL(rc, 0); 2646 2647 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 2648 CU_ASSERT_EQUAL(rc, 0); 2649 2650 g_event_type1 = 0xFF; 2651 g_event_type2 = 0xFF; 2652 2653 /* Simulate hot-unplug by unregistering bdev */ 2654 spdk_bdev_unregister(bdev, NULL, NULL); 2655 poll_threads(); 2656 2657 /* Check if correct events have been triggered in event callback fn */ 2658 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 2659 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 2660 2661 free_bdev(bdev); 2662 poll_threads(); 2663 } 2664 2665 struct timeout_io_cb_arg { 2666 struct iovec iov; 2667 uint8_t type; 2668 }; 2669 2670 static int 2671 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 2672 { 2673 struct spdk_bdev_io *bdev_io; 2674 int n = 0; 2675 2676 if (!ch) { 2677 return -1; 2678 } 2679 2680 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 2681 n++; 2682 } 2683 2684 return n; 2685 } 2686 2687 static void 2688 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 2689 { 2690 struct timeout_io_cb_arg *ctx = cb_arg; 2691 2692 ctx->type = bdev_io->type; 2693 ctx->iov.iov_base = bdev_io->iov.iov_base; 2694 ctx->iov.iov_len = bdev_io->iov.iov_len; 2695 } 2696 2697 static void 2698 bdev_set_io_timeout(void) 2699 { 2700 struct spdk_bdev *bdev; 2701 struct spdk_bdev_desc *desc = NULL; 2702 struct spdk_io_channel *io_ch = NULL; 2703 struct spdk_bdev_channel *bdev_ch = NULL; 2704 struct timeout_io_cb_arg cb_arg; 2705 2706 spdk_bdev_initialize(bdev_init_cb, NULL); 2707 2708 bdev = allocate_bdev("bdev"); 2709 2710 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 2711 SPDK_CU_ASSERT_FATAL(desc != NULL); 2712 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2713 2714 io_ch = spdk_bdev_get_io_channel(desc); 2715 CU_ASSERT(io_ch != NULL); 2716 2717 bdev_ch = spdk_io_channel_get_ctx(io_ch); 2718 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 2719 2720 /* This is the part1. 2721 * We will check the bdev_ch->io_submitted list 2722 * TO make sure that it can link IOs and only the user submitted IOs 2723 */ 2724 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 2725 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2726 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 2727 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2728 stub_complete_io(1); 2729 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2730 stub_complete_io(1); 2731 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2732 2733 /* Split IO */ 2734 bdev->optimal_io_boundary = 16; 2735 bdev->split_on_optimal_io_boundary = true; 2736 2737 /* Now test that a single-vector command is split correctly. 2738 * Offset 14, length 8, payload 0xF000 2739 * Child - Offset 14, length 2, payload 0xF000 2740 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2741 * 2742 * Set up the expected values before calling spdk_bdev_read_blocks 2743 */ 2744 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2745 /* We count all submitted IOs including IO that are generated by splitting. */ 2746 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 2747 stub_complete_io(1); 2748 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2749 stub_complete_io(1); 2750 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2751 2752 /* Also include the reset IO */ 2753 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2754 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2755 poll_threads(); 2756 stub_complete_io(1); 2757 poll_threads(); 2758 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2759 2760 /* This is part2 2761 * Test the desc timeout poller register 2762 */ 2763 2764 /* Successfully set the timeout */ 2765 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2766 CU_ASSERT(desc->io_timeout_poller != NULL); 2767 CU_ASSERT(desc->timeout_in_sec == 30); 2768 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2769 CU_ASSERT(desc->cb_arg == &cb_arg); 2770 2771 /* Change the timeout limit */ 2772 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2773 CU_ASSERT(desc->io_timeout_poller != NULL); 2774 CU_ASSERT(desc->timeout_in_sec == 20); 2775 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2776 CU_ASSERT(desc->cb_arg == &cb_arg); 2777 2778 /* Disable the timeout */ 2779 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 2780 CU_ASSERT(desc->io_timeout_poller == NULL); 2781 2782 /* This the part3 2783 * We will test to catch timeout IO and check whether the IO is 2784 * the submitted one. 2785 */ 2786 memset(&cb_arg, 0, sizeof(cb_arg)); 2787 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2788 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 2789 2790 /* Don't reach the limit */ 2791 spdk_delay_us(15 * spdk_get_ticks_hz()); 2792 poll_threads(); 2793 CU_ASSERT(cb_arg.type == 0); 2794 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2795 CU_ASSERT(cb_arg.iov.iov_len == 0); 2796 2797 /* 15 + 15 = 30 reach the limit */ 2798 spdk_delay_us(15 * spdk_get_ticks_hz()); 2799 poll_threads(); 2800 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2801 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 2802 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 2803 stub_complete_io(1); 2804 2805 /* Use the same split IO above and check the IO */ 2806 memset(&cb_arg, 0, sizeof(cb_arg)); 2807 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2808 2809 /* The first child complete in time */ 2810 spdk_delay_us(15 * spdk_get_ticks_hz()); 2811 poll_threads(); 2812 stub_complete_io(1); 2813 CU_ASSERT(cb_arg.type == 0); 2814 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2815 CU_ASSERT(cb_arg.iov.iov_len == 0); 2816 2817 /* The second child reach the limit */ 2818 spdk_delay_us(15 * spdk_get_ticks_hz()); 2819 poll_threads(); 2820 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2821 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 2822 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 2823 stub_complete_io(1); 2824 2825 /* Also include the reset IO */ 2826 memset(&cb_arg, 0, sizeof(cb_arg)); 2827 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2828 spdk_delay_us(30 * spdk_get_ticks_hz()); 2829 poll_threads(); 2830 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 2831 stub_complete_io(1); 2832 poll_threads(); 2833 2834 spdk_put_io_channel(io_ch); 2835 spdk_bdev_close(desc); 2836 free_bdev(bdev); 2837 spdk_bdev_finish(bdev_fini_cb, NULL); 2838 poll_threads(); 2839 } 2840 2841 static void 2842 lba_range_overlap(void) 2843 { 2844 struct lba_range r1, r2; 2845 2846 r1.offset = 100; 2847 r1.length = 50; 2848 2849 r2.offset = 0; 2850 r2.length = 1; 2851 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2852 2853 r2.offset = 0; 2854 r2.length = 100; 2855 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2856 2857 r2.offset = 0; 2858 r2.length = 110; 2859 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2860 2861 r2.offset = 100; 2862 r2.length = 10; 2863 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2864 2865 r2.offset = 110; 2866 r2.length = 20; 2867 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2868 2869 r2.offset = 140; 2870 r2.length = 150; 2871 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2872 2873 r2.offset = 130; 2874 r2.length = 200; 2875 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2876 2877 r2.offset = 150; 2878 r2.length = 100; 2879 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2880 2881 r2.offset = 110; 2882 r2.length = 0; 2883 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2884 } 2885 2886 static bool g_lock_lba_range_done; 2887 static bool g_unlock_lba_range_done; 2888 2889 static void 2890 lock_lba_range_done(void *ctx, int status) 2891 { 2892 g_lock_lba_range_done = true; 2893 } 2894 2895 static void 2896 unlock_lba_range_done(void *ctx, int status) 2897 { 2898 g_unlock_lba_range_done = true; 2899 } 2900 2901 static void 2902 lock_lba_range_check_ranges(void) 2903 { 2904 struct spdk_bdev *bdev; 2905 struct spdk_bdev_desc *desc = NULL; 2906 struct spdk_io_channel *io_ch; 2907 struct spdk_bdev_channel *channel; 2908 struct lba_range *range; 2909 int ctx1; 2910 int rc; 2911 2912 spdk_bdev_initialize(bdev_init_cb, NULL); 2913 2914 bdev = allocate_bdev("bdev0"); 2915 2916 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2917 CU_ASSERT(rc == 0); 2918 CU_ASSERT(desc != NULL); 2919 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2920 io_ch = spdk_bdev_get_io_channel(desc); 2921 CU_ASSERT(io_ch != NULL); 2922 channel = spdk_io_channel_get_ctx(io_ch); 2923 2924 g_lock_lba_range_done = false; 2925 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2926 CU_ASSERT(rc == 0); 2927 poll_threads(); 2928 2929 CU_ASSERT(g_lock_lba_range_done == true); 2930 range = TAILQ_FIRST(&channel->locked_ranges); 2931 SPDK_CU_ASSERT_FATAL(range != NULL); 2932 CU_ASSERT(range->offset == 20); 2933 CU_ASSERT(range->length == 10); 2934 CU_ASSERT(range->owner_ch == channel); 2935 2936 /* Unlocks must exactly match a lock. */ 2937 g_unlock_lba_range_done = false; 2938 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 2939 CU_ASSERT(rc == -EINVAL); 2940 CU_ASSERT(g_unlock_lba_range_done == false); 2941 2942 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 2943 CU_ASSERT(rc == 0); 2944 spdk_delay_us(100); 2945 poll_threads(); 2946 2947 CU_ASSERT(g_unlock_lba_range_done == true); 2948 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 2949 2950 spdk_put_io_channel(io_ch); 2951 spdk_bdev_close(desc); 2952 free_bdev(bdev); 2953 spdk_bdev_finish(bdev_fini_cb, NULL); 2954 poll_threads(); 2955 } 2956 2957 static void 2958 lock_lba_range_with_io_outstanding(void) 2959 { 2960 struct spdk_bdev *bdev; 2961 struct spdk_bdev_desc *desc = NULL; 2962 struct spdk_io_channel *io_ch; 2963 struct spdk_bdev_channel *channel; 2964 struct lba_range *range; 2965 char buf[4096]; 2966 int ctx1; 2967 int rc; 2968 2969 spdk_bdev_initialize(bdev_init_cb, NULL); 2970 2971 bdev = allocate_bdev("bdev0"); 2972 2973 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2974 CU_ASSERT(rc == 0); 2975 CU_ASSERT(desc != NULL); 2976 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2977 io_ch = spdk_bdev_get_io_channel(desc); 2978 CU_ASSERT(io_ch != NULL); 2979 channel = spdk_io_channel_get_ctx(io_ch); 2980 2981 g_io_done = false; 2982 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 2983 CU_ASSERT(rc == 0); 2984 2985 g_lock_lba_range_done = false; 2986 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2987 CU_ASSERT(rc == 0); 2988 poll_threads(); 2989 2990 /* The lock should immediately become valid, since there are no outstanding 2991 * write I/O. 2992 */ 2993 CU_ASSERT(g_io_done == false); 2994 CU_ASSERT(g_lock_lba_range_done == true); 2995 range = TAILQ_FIRST(&channel->locked_ranges); 2996 SPDK_CU_ASSERT_FATAL(range != NULL); 2997 CU_ASSERT(range->offset == 20); 2998 CU_ASSERT(range->length == 10); 2999 CU_ASSERT(range->owner_ch == channel); 3000 CU_ASSERT(range->locked_ctx == &ctx1); 3001 3002 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3003 CU_ASSERT(rc == 0); 3004 stub_complete_io(1); 3005 spdk_delay_us(100); 3006 poll_threads(); 3007 3008 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3009 3010 /* Now try again, but with a write I/O. */ 3011 g_io_done = false; 3012 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3013 CU_ASSERT(rc == 0); 3014 3015 g_lock_lba_range_done = false; 3016 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3017 CU_ASSERT(rc == 0); 3018 poll_threads(); 3019 3020 /* The lock should not be fully valid yet, since a write I/O is outstanding. 3021 * But note that the range should be on the channel's locked_list, to make sure no 3022 * new write I/O are started. 3023 */ 3024 CU_ASSERT(g_io_done == false); 3025 CU_ASSERT(g_lock_lba_range_done == false); 3026 range = TAILQ_FIRST(&channel->locked_ranges); 3027 SPDK_CU_ASSERT_FATAL(range != NULL); 3028 CU_ASSERT(range->offset == 20); 3029 CU_ASSERT(range->length == 10); 3030 3031 /* Complete the write I/O. This should make the lock valid (checked by confirming 3032 * our callback was invoked). 3033 */ 3034 stub_complete_io(1); 3035 spdk_delay_us(100); 3036 poll_threads(); 3037 CU_ASSERT(g_io_done == true); 3038 CU_ASSERT(g_lock_lba_range_done == true); 3039 3040 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3041 CU_ASSERT(rc == 0); 3042 poll_threads(); 3043 3044 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3045 3046 spdk_put_io_channel(io_ch); 3047 spdk_bdev_close(desc); 3048 free_bdev(bdev); 3049 spdk_bdev_finish(bdev_fini_cb, NULL); 3050 poll_threads(); 3051 } 3052 3053 static void 3054 lock_lba_range_overlapped(void) 3055 { 3056 struct spdk_bdev *bdev; 3057 struct spdk_bdev_desc *desc = NULL; 3058 struct spdk_io_channel *io_ch; 3059 struct spdk_bdev_channel *channel; 3060 struct lba_range *range; 3061 int ctx1; 3062 int rc; 3063 3064 spdk_bdev_initialize(bdev_init_cb, NULL); 3065 3066 bdev = allocate_bdev("bdev0"); 3067 3068 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3069 CU_ASSERT(rc == 0); 3070 CU_ASSERT(desc != NULL); 3071 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3072 io_ch = spdk_bdev_get_io_channel(desc); 3073 CU_ASSERT(io_ch != NULL); 3074 channel = spdk_io_channel_get_ctx(io_ch); 3075 3076 /* Lock range 20-29. */ 3077 g_lock_lba_range_done = false; 3078 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3079 CU_ASSERT(rc == 0); 3080 poll_threads(); 3081 3082 CU_ASSERT(g_lock_lba_range_done == true); 3083 range = TAILQ_FIRST(&channel->locked_ranges); 3084 SPDK_CU_ASSERT_FATAL(range != NULL); 3085 CU_ASSERT(range->offset == 20); 3086 CU_ASSERT(range->length == 10); 3087 3088 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 3089 * 20-29. 3090 */ 3091 g_lock_lba_range_done = false; 3092 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 3093 CU_ASSERT(rc == 0); 3094 poll_threads(); 3095 3096 CU_ASSERT(g_lock_lba_range_done == false); 3097 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3098 SPDK_CU_ASSERT_FATAL(range != NULL); 3099 CU_ASSERT(range->offset == 25); 3100 CU_ASSERT(range->length == 15); 3101 3102 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 3103 * no longer overlaps with an active lock. 3104 */ 3105 g_unlock_lba_range_done = false; 3106 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3107 CU_ASSERT(rc == 0); 3108 poll_threads(); 3109 3110 CU_ASSERT(g_unlock_lba_range_done == true); 3111 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3112 range = TAILQ_FIRST(&channel->locked_ranges); 3113 SPDK_CU_ASSERT_FATAL(range != NULL); 3114 CU_ASSERT(range->offset == 25); 3115 CU_ASSERT(range->length == 15); 3116 3117 /* Lock 40-59. This should immediately lock since it does not overlap with the 3118 * currently active 25-39 lock. 3119 */ 3120 g_lock_lba_range_done = false; 3121 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 3122 CU_ASSERT(rc == 0); 3123 poll_threads(); 3124 3125 CU_ASSERT(g_lock_lba_range_done == true); 3126 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3127 SPDK_CU_ASSERT_FATAL(range != NULL); 3128 range = TAILQ_NEXT(range, tailq); 3129 SPDK_CU_ASSERT_FATAL(range != NULL); 3130 CU_ASSERT(range->offset == 40); 3131 CU_ASSERT(range->length == 20); 3132 3133 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 3134 g_lock_lba_range_done = false; 3135 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 3136 CU_ASSERT(rc == 0); 3137 poll_threads(); 3138 3139 CU_ASSERT(g_lock_lba_range_done == false); 3140 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3141 SPDK_CU_ASSERT_FATAL(range != NULL); 3142 CU_ASSERT(range->offset == 35); 3143 CU_ASSERT(range->length == 10); 3144 3145 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 3146 * the 40-59 lock is still active. 3147 */ 3148 g_unlock_lba_range_done = false; 3149 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 3150 CU_ASSERT(rc == 0); 3151 poll_threads(); 3152 3153 CU_ASSERT(g_unlock_lba_range_done == true); 3154 CU_ASSERT(g_lock_lba_range_done == false); 3155 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3156 SPDK_CU_ASSERT_FATAL(range != NULL); 3157 CU_ASSERT(range->offset == 35); 3158 CU_ASSERT(range->length == 10); 3159 3160 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 3161 * no longer any active overlapping locks. 3162 */ 3163 g_unlock_lba_range_done = false; 3164 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 3165 CU_ASSERT(rc == 0); 3166 poll_threads(); 3167 3168 CU_ASSERT(g_unlock_lba_range_done == true); 3169 CU_ASSERT(g_lock_lba_range_done == true); 3170 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3171 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3172 SPDK_CU_ASSERT_FATAL(range != NULL); 3173 CU_ASSERT(range->offset == 35); 3174 CU_ASSERT(range->length == 10); 3175 3176 /* Finally, unlock 35-44. */ 3177 g_unlock_lba_range_done = false; 3178 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 3179 CU_ASSERT(rc == 0); 3180 poll_threads(); 3181 3182 CU_ASSERT(g_unlock_lba_range_done == true); 3183 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 3184 3185 spdk_put_io_channel(io_ch); 3186 spdk_bdev_close(desc); 3187 free_bdev(bdev); 3188 spdk_bdev_finish(bdev_fini_cb, NULL); 3189 poll_threads(); 3190 } 3191 3192 static void 3193 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 3194 { 3195 g_abort_done = true; 3196 g_abort_status = bdev_io->internal.status; 3197 spdk_bdev_free_io(bdev_io); 3198 } 3199 3200 static void 3201 bdev_io_abort(void) 3202 { 3203 struct spdk_bdev *bdev; 3204 struct spdk_bdev_desc *desc = NULL; 3205 struct spdk_io_channel *io_ch; 3206 struct spdk_bdev_channel *channel; 3207 struct spdk_bdev_mgmt_channel *mgmt_ch; 3208 struct spdk_bdev_opts bdev_opts = { 3209 .bdev_io_pool_size = 7, 3210 .bdev_io_cache_size = 2, 3211 }; 3212 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 3213 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 3214 int rc; 3215 3216 rc = spdk_bdev_set_opts(&bdev_opts); 3217 CU_ASSERT(rc == 0); 3218 spdk_bdev_initialize(bdev_init_cb, NULL); 3219 3220 bdev = allocate_bdev("bdev0"); 3221 3222 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3223 CU_ASSERT(rc == 0); 3224 CU_ASSERT(desc != NULL); 3225 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3226 io_ch = spdk_bdev_get_io_channel(desc); 3227 CU_ASSERT(io_ch != NULL); 3228 channel = spdk_io_channel_get_ctx(io_ch); 3229 mgmt_ch = channel->shared_resource->mgmt_ch; 3230 3231 g_abort_done = false; 3232 3233 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 3234 3235 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3236 CU_ASSERT(rc == -ENOTSUP); 3237 3238 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 3239 3240 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 3241 CU_ASSERT(rc == 0); 3242 CU_ASSERT(g_abort_done == true); 3243 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 3244 3245 /* Test the case that the target I/O was successfully aborted. */ 3246 g_io_done = false; 3247 3248 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3249 CU_ASSERT(rc == 0); 3250 CU_ASSERT(g_io_done == false); 3251 3252 g_abort_done = false; 3253 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3254 3255 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3256 CU_ASSERT(rc == 0); 3257 CU_ASSERT(g_io_done == true); 3258 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3259 stub_complete_io(1); 3260 CU_ASSERT(g_abort_done == true); 3261 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3262 3263 /* Test the case that the target I/O was not aborted because it completed 3264 * in the middle of execution of the abort. 3265 */ 3266 g_io_done = false; 3267 3268 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3269 CU_ASSERT(rc == 0); 3270 CU_ASSERT(g_io_done == false); 3271 3272 g_abort_done = false; 3273 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3274 3275 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3276 CU_ASSERT(rc == 0); 3277 CU_ASSERT(g_io_done == false); 3278 3279 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3280 stub_complete_io(1); 3281 CU_ASSERT(g_io_done == true); 3282 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3283 3284 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3285 stub_complete_io(1); 3286 CU_ASSERT(g_abort_done == true); 3287 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3288 3289 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3290 3291 bdev->optimal_io_boundary = 16; 3292 bdev->split_on_optimal_io_boundary = true; 3293 3294 /* Test that a single-vector command which is split is aborted correctly. 3295 * Offset 14, length 8, payload 0xF000 3296 * Child - Offset 14, length 2, payload 0xF000 3297 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3298 */ 3299 g_io_done = false; 3300 3301 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 3302 CU_ASSERT(rc == 0); 3303 CU_ASSERT(g_io_done == false); 3304 3305 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3306 3307 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3308 3309 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3310 CU_ASSERT(rc == 0); 3311 CU_ASSERT(g_io_done == true); 3312 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3313 stub_complete_io(2); 3314 CU_ASSERT(g_abort_done == true); 3315 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3316 3317 /* Test that a multi-vector command that needs to be split by strip and then 3318 * needs to be split is aborted correctly. Abort is requested before the second 3319 * child I/O was submitted. The parent I/O should complete with failure without 3320 * submitting the second child I/O. 3321 */ 3322 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 3323 iov[i].iov_base = (void *)((i + 1) * 0x10000); 3324 iov[i].iov_len = 512; 3325 } 3326 3327 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 3328 g_io_done = false; 3329 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 3330 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 3331 CU_ASSERT(rc == 0); 3332 CU_ASSERT(g_io_done == false); 3333 3334 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3335 3336 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3337 3338 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3339 CU_ASSERT(rc == 0); 3340 CU_ASSERT(g_io_done == true); 3341 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3342 stub_complete_io(1); 3343 CU_ASSERT(g_abort_done == true); 3344 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3345 3346 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3347 3348 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3349 3350 bdev->optimal_io_boundary = 16; 3351 g_io_done = false; 3352 3353 /* Test that a ingle-vector command which is split is aborted correctly. 3354 * Differently from the above, the child abort request will be submitted 3355 * sequentially due to the capacity of spdk_bdev_io. 3356 */ 3357 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 3358 CU_ASSERT(rc == 0); 3359 CU_ASSERT(g_io_done == false); 3360 3361 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 3362 3363 g_abort_done = false; 3364 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3365 3366 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3367 CU_ASSERT(rc == 0); 3368 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3369 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 3370 3371 stub_complete_io(1); 3372 CU_ASSERT(g_io_done == true); 3373 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3374 stub_complete_io(3); 3375 CU_ASSERT(g_abort_done == true); 3376 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3377 3378 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3379 3380 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3381 3382 spdk_put_io_channel(io_ch); 3383 spdk_bdev_close(desc); 3384 free_bdev(bdev); 3385 spdk_bdev_finish(bdev_fini_cb, NULL); 3386 poll_threads(); 3387 } 3388 3389 int 3390 main(int argc, char **argv) 3391 { 3392 CU_pSuite suite = NULL; 3393 unsigned int num_failures; 3394 3395 CU_set_error_action(CUEA_ABORT); 3396 CU_initialize_registry(); 3397 3398 suite = CU_add_suite("bdev", null_init, null_clean); 3399 3400 CU_ADD_TEST(suite, bytes_to_blocks_test); 3401 CU_ADD_TEST(suite, num_blocks_test); 3402 CU_ADD_TEST(suite, io_valid_test); 3403 CU_ADD_TEST(suite, open_write_test); 3404 CU_ADD_TEST(suite, alias_add_del_test); 3405 CU_ADD_TEST(suite, get_device_stat_test); 3406 CU_ADD_TEST(suite, bdev_io_types_test); 3407 CU_ADD_TEST(suite, bdev_io_wait_test); 3408 CU_ADD_TEST(suite, bdev_io_spans_boundary_test); 3409 CU_ADD_TEST(suite, bdev_io_split_test); 3410 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 3411 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 3412 CU_ADD_TEST(suite, bdev_io_alignment); 3413 CU_ADD_TEST(suite, bdev_histograms); 3414 CU_ADD_TEST(suite, bdev_write_zeroes); 3415 CU_ADD_TEST(suite, bdev_compare_and_write); 3416 CU_ADD_TEST(suite, bdev_compare); 3417 CU_ADD_TEST(suite, bdev_open_while_hotremove); 3418 CU_ADD_TEST(suite, bdev_close_while_hotremove); 3419 CU_ADD_TEST(suite, bdev_open_ext); 3420 CU_ADD_TEST(suite, bdev_set_io_timeout); 3421 CU_ADD_TEST(suite, lba_range_overlap); 3422 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 3423 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 3424 CU_ADD_TEST(suite, lock_lba_range_overlapped); 3425 CU_ADD_TEST(suite, bdev_io_abort); 3426 3427 allocate_cores(1); 3428 allocate_threads(1); 3429 set_thread(0); 3430 3431 CU_basic_set_mode(CU_BRM_VERBOSE); 3432 CU_basic_run_tests(); 3433 num_failures = CU_get_number_of_failures(); 3434 CU_cleanup_registry(); 3435 3436 free_threads(); 3437 free_cores(); 3438 3439 return num_failures; 3440 } 3441