1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk_cunit.h" 35 36 #include "common/lib/ut_multithread.c" 37 #include "unit/lib/json_mock.c" 38 39 #include "spdk/config.h" 40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 41 #undef SPDK_CONFIG_VTUNE 42 43 #include "bdev/bdev.c" 44 45 struct spdk_trace_histories *g_trace_histories; 46 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 47 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix)); 48 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 49 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 50 uint16_t tpoint_id, uint8_t owner_type, 51 uint8_t object_type, uint8_t new_object, 52 uint8_t arg1_type, const char *arg1_name)); 53 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 54 uint32_t size, uint64_t object_id, uint64_t arg1)); 55 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 56 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 57 58 59 int g_status; 60 int g_count; 61 enum spdk_bdev_event_type g_event_type1; 62 enum spdk_bdev_event_type g_event_type2; 63 struct spdk_histogram_data *g_histogram; 64 void *g_unregister_arg; 65 int g_unregister_rc; 66 67 void 68 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 69 int *sc, int *sk, int *asc, int *ascq) 70 { 71 } 72 73 static int 74 null_init(void) 75 { 76 return 0; 77 } 78 79 static int 80 null_clean(void) 81 { 82 return 0; 83 } 84 85 static int 86 stub_destruct(void *ctx) 87 { 88 return 0; 89 } 90 91 struct ut_expected_io { 92 uint8_t type; 93 uint64_t offset; 94 uint64_t length; 95 int iovcnt; 96 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 97 void *md_buf; 98 TAILQ_ENTRY(ut_expected_io) link; 99 }; 100 101 struct bdev_ut_channel { 102 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 103 uint32_t outstanding_io_count; 104 TAILQ_HEAD(, ut_expected_io) expected_io; 105 }; 106 107 static bool g_io_done; 108 static struct spdk_bdev_io *g_bdev_io; 109 static enum spdk_bdev_io_status g_io_status; 110 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 111 static uint32_t g_bdev_ut_io_device; 112 static struct bdev_ut_channel *g_bdev_ut_channel; 113 static void *g_compare_read_buf; 114 static uint32_t g_compare_read_buf_len; 115 static void *g_compare_write_buf; 116 static uint32_t g_compare_write_buf_len; 117 static bool g_abort_done; 118 static enum spdk_bdev_io_status g_abort_status; 119 120 static struct ut_expected_io * 121 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 122 { 123 struct ut_expected_io *expected_io; 124 125 expected_io = calloc(1, sizeof(*expected_io)); 126 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 127 128 expected_io->type = type; 129 expected_io->offset = offset; 130 expected_io->length = length; 131 expected_io->iovcnt = iovcnt; 132 133 return expected_io; 134 } 135 136 static void 137 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 138 { 139 expected_io->iov[pos].iov_base = base; 140 expected_io->iov[pos].iov_len = len; 141 } 142 143 static void 144 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 145 { 146 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 147 struct ut_expected_io *expected_io; 148 struct iovec *iov, *expected_iov; 149 struct spdk_bdev_io *bio_to_abort; 150 int i; 151 152 g_bdev_io = bdev_io; 153 154 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 155 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 156 157 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 158 CU_ASSERT(g_compare_read_buf_len == len); 159 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 160 } 161 162 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 163 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 164 165 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 166 CU_ASSERT(g_compare_write_buf_len == len); 167 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 168 } 169 170 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 171 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 172 173 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 174 CU_ASSERT(g_compare_read_buf_len == len); 175 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 176 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 177 } 178 } 179 180 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 181 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 182 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 183 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 184 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 185 ch->outstanding_io_count--; 186 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 187 break; 188 } 189 } 190 } 191 } 192 193 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 194 ch->outstanding_io_count++; 195 196 expected_io = TAILQ_FIRST(&ch->expected_io); 197 if (expected_io == NULL) { 198 return; 199 } 200 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 201 202 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 203 CU_ASSERT(bdev_io->type == expected_io->type); 204 } 205 206 if (expected_io->md_buf != NULL) { 207 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 208 } 209 210 if (expected_io->length == 0) { 211 free(expected_io); 212 return; 213 } 214 215 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 216 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 217 218 if (expected_io->iovcnt == 0) { 219 free(expected_io); 220 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 221 return; 222 } 223 224 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 225 for (i = 0; i < expected_io->iovcnt; i++) { 226 iov = &bdev_io->u.bdev.iovs[i]; 227 expected_iov = &expected_io->iov[i]; 228 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 229 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 230 } 231 232 free(expected_io); 233 } 234 235 static void 236 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 237 struct spdk_bdev_io *bdev_io, bool success) 238 { 239 CU_ASSERT(success == true); 240 241 stub_submit_request(_ch, bdev_io); 242 } 243 244 static void 245 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 246 { 247 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 248 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 249 } 250 251 static uint32_t 252 stub_complete_io(uint32_t num_to_complete) 253 { 254 struct bdev_ut_channel *ch = g_bdev_ut_channel; 255 struct spdk_bdev_io *bdev_io; 256 static enum spdk_bdev_io_status io_status; 257 uint32_t num_completed = 0; 258 259 while (num_completed < num_to_complete) { 260 if (TAILQ_EMPTY(&ch->outstanding_io)) { 261 break; 262 } 263 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 264 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 265 ch->outstanding_io_count--; 266 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 267 g_io_exp_status; 268 spdk_bdev_io_complete(bdev_io, io_status); 269 num_completed++; 270 } 271 272 return num_completed; 273 } 274 275 static struct spdk_io_channel * 276 bdev_ut_get_io_channel(void *ctx) 277 { 278 return spdk_get_io_channel(&g_bdev_ut_io_device); 279 } 280 281 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 282 [SPDK_BDEV_IO_TYPE_READ] = true, 283 [SPDK_BDEV_IO_TYPE_WRITE] = true, 284 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 285 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 286 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 287 [SPDK_BDEV_IO_TYPE_RESET] = true, 288 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 289 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 290 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 291 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 292 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 293 [SPDK_BDEV_IO_TYPE_ABORT] = true, 294 }; 295 296 static void 297 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 298 { 299 g_io_types_supported[io_type] = enable; 300 } 301 302 static bool 303 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 304 { 305 return g_io_types_supported[io_type]; 306 } 307 308 static struct spdk_bdev_fn_table fn_table = { 309 .destruct = stub_destruct, 310 .submit_request = stub_submit_request, 311 .get_io_channel = bdev_ut_get_io_channel, 312 .io_type_supported = stub_io_type_supported, 313 }; 314 315 static int 316 bdev_ut_create_ch(void *io_device, void *ctx_buf) 317 { 318 struct bdev_ut_channel *ch = ctx_buf; 319 320 CU_ASSERT(g_bdev_ut_channel == NULL); 321 g_bdev_ut_channel = ch; 322 323 TAILQ_INIT(&ch->outstanding_io); 324 ch->outstanding_io_count = 0; 325 TAILQ_INIT(&ch->expected_io); 326 return 0; 327 } 328 329 static void 330 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 331 { 332 CU_ASSERT(g_bdev_ut_channel != NULL); 333 g_bdev_ut_channel = NULL; 334 } 335 336 struct spdk_bdev_module bdev_ut_if; 337 338 static int 339 bdev_ut_module_init(void) 340 { 341 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 342 sizeof(struct bdev_ut_channel), NULL); 343 spdk_bdev_module_init_done(&bdev_ut_if); 344 return 0; 345 } 346 347 static void 348 bdev_ut_module_fini(void) 349 { 350 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 351 } 352 353 struct spdk_bdev_module bdev_ut_if = { 354 .name = "bdev_ut", 355 .module_init = bdev_ut_module_init, 356 .module_fini = bdev_ut_module_fini, 357 .async_init = true, 358 }; 359 360 static void vbdev_ut_examine(struct spdk_bdev *bdev); 361 362 static int 363 vbdev_ut_module_init(void) 364 { 365 return 0; 366 } 367 368 static void 369 vbdev_ut_module_fini(void) 370 { 371 } 372 373 struct spdk_bdev_module vbdev_ut_if = { 374 .name = "vbdev_ut", 375 .module_init = vbdev_ut_module_init, 376 .module_fini = vbdev_ut_module_fini, 377 .examine_config = vbdev_ut_examine, 378 }; 379 380 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 381 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 382 383 static void 384 vbdev_ut_examine(struct spdk_bdev *bdev) 385 { 386 spdk_bdev_module_examine_done(&vbdev_ut_if); 387 } 388 389 static struct spdk_bdev * 390 allocate_bdev(char *name) 391 { 392 struct spdk_bdev *bdev; 393 int rc; 394 395 bdev = calloc(1, sizeof(*bdev)); 396 SPDK_CU_ASSERT_FATAL(bdev != NULL); 397 398 bdev->name = name; 399 bdev->fn_table = &fn_table; 400 bdev->module = &bdev_ut_if; 401 bdev->blockcnt = 1024; 402 bdev->blocklen = 512; 403 404 rc = spdk_bdev_register(bdev); 405 CU_ASSERT(rc == 0); 406 407 return bdev; 408 } 409 410 static struct spdk_bdev * 411 allocate_vbdev(char *name) 412 { 413 struct spdk_bdev *bdev; 414 int rc; 415 416 bdev = calloc(1, sizeof(*bdev)); 417 SPDK_CU_ASSERT_FATAL(bdev != NULL); 418 419 bdev->name = name; 420 bdev->fn_table = &fn_table; 421 bdev->module = &vbdev_ut_if; 422 423 rc = spdk_bdev_register(bdev); 424 CU_ASSERT(rc == 0); 425 426 return bdev; 427 } 428 429 static void 430 free_bdev(struct spdk_bdev *bdev) 431 { 432 spdk_bdev_unregister(bdev, NULL, NULL); 433 poll_threads(); 434 memset(bdev, 0xFF, sizeof(*bdev)); 435 free(bdev); 436 } 437 438 static void 439 free_vbdev(struct spdk_bdev *bdev) 440 { 441 spdk_bdev_unregister(bdev, NULL, NULL); 442 poll_threads(); 443 memset(bdev, 0xFF, sizeof(*bdev)); 444 free(bdev); 445 } 446 447 static void 448 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 449 { 450 const char *bdev_name; 451 452 CU_ASSERT(bdev != NULL); 453 CU_ASSERT(rc == 0); 454 bdev_name = spdk_bdev_get_name(bdev); 455 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 456 457 free(stat); 458 free_bdev(bdev); 459 460 *(bool *)cb_arg = true; 461 } 462 463 static void 464 bdev_unregister_cb(void *cb_arg, int rc) 465 { 466 g_unregister_arg = cb_arg; 467 g_unregister_rc = rc; 468 } 469 470 static void 471 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 472 { 473 } 474 475 static void 476 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 477 { 478 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 479 480 g_event_type1 = type; 481 if (SPDK_BDEV_EVENT_REMOVE == type) { 482 spdk_bdev_close(desc); 483 } 484 } 485 486 static void 487 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 488 { 489 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 490 491 g_event_type2 = type; 492 if (SPDK_BDEV_EVENT_REMOVE == type) { 493 spdk_bdev_close(desc); 494 } 495 } 496 497 static void 498 get_device_stat_test(void) 499 { 500 struct spdk_bdev *bdev; 501 struct spdk_bdev_io_stat *stat; 502 bool done; 503 504 bdev = allocate_bdev("bdev0"); 505 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 506 if (stat == NULL) { 507 free_bdev(bdev); 508 return; 509 } 510 511 done = false; 512 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 513 while (!done) { poll_threads(); } 514 515 516 } 517 518 static void 519 open_write_test(void) 520 { 521 struct spdk_bdev *bdev[9]; 522 struct spdk_bdev_desc *desc[9] = {}; 523 int rc; 524 525 /* 526 * Create a tree of bdevs to test various open w/ write cases. 527 * 528 * bdev0 through bdev3 are physical block devices, such as NVMe 529 * namespaces or Ceph block devices. 530 * 531 * bdev4 is a virtual bdev with multiple base bdevs. This models 532 * caching or RAID use cases. 533 * 534 * bdev5 through bdev7 are all virtual bdevs with the same base 535 * bdev (except bdev7). This models partitioning or logical volume 536 * use cases. 537 * 538 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 539 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 540 * models caching, RAID, partitioning or logical volumes use cases. 541 * 542 * bdev8 is a virtual bdev with multiple base bdevs, but these 543 * base bdevs are themselves virtual bdevs. 544 * 545 * bdev8 546 * | 547 * +----------+ 548 * | | 549 * bdev4 bdev5 bdev6 bdev7 550 * | | | | 551 * +---+---+ +---+ + +---+---+ 552 * | | \ | / \ 553 * bdev0 bdev1 bdev2 bdev3 554 */ 555 556 bdev[0] = allocate_bdev("bdev0"); 557 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 558 CU_ASSERT(rc == 0); 559 560 bdev[1] = allocate_bdev("bdev1"); 561 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 562 CU_ASSERT(rc == 0); 563 564 bdev[2] = allocate_bdev("bdev2"); 565 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 566 CU_ASSERT(rc == 0); 567 568 bdev[3] = allocate_bdev("bdev3"); 569 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 570 CU_ASSERT(rc == 0); 571 572 bdev[4] = allocate_vbdev("bdev4"); 573 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 574 CU_ASSERT(rc == 0); 575 576 bdev[5] = allocate_vbdev("bdev5"); 577 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 578 CU_ASSERT(rc == 0); 579 580 bdev[6] = allocate_vbdev("bdev6"); 581 582 bdev[7] = allocate_vbdev("bdev7"); 583 584 bdev[8] = allocate_vbdev("bdev8"); 585 586 /* Open bdev0 read-only. This should succeed. */ 587 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 588 CU_ASSERT(rc == 0); 589 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 590 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 591 spdk_bdev_close(desc[0]); 592 593 /* 594 * Open bdev1 read/write. This should fail since bdev1 has been claimed 595 * by a vbdev module. 596 */ 597 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 598 CU_ASSERT(rc == -EPERM); 599 600 /* 601 * Open bdev4 read/write. This should fail since bdev3 has been claimed 602 * by a vbdev module. 603 */ 604 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 605 CU_ASSERT(rc == -EPERM); 606 607 /* Open bdev4 read-only. This should succeed. */ 608 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 609 CU_ASSERT(rc == 0); 610 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 611 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 612 spdk_bdev_close(desc[4]); 613 614 /* 615 * Open bdev8 read/write. This should succeed since it is a leaf 616 * bdev. 617 */ 618 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 619 CU_ASSERT(rc == 0); 620 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 621 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 622 spdk_bdev_close(desc[8]); 623 624 /* 625 * Open bdev5 read/write. This should fail since bdev4 has been claimed 626 * by a vbdev module. 627 */ 628 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 629 CU_ASSERT(rc == -EPERM); 630 631 /* Open bdev4 read-only. This should succeed. */ 632 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 633 CU_ASSERT(rc == 0); 634 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 635 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 636 spdk_bdev_close(desc[5]); 637 638 free_vbdev(bdev[8]); 639 640 free_vbdev(bdev[5]); 641 free_vbdev(bdev[6]); 642 free_vbdev(bdev[7]); 643 644 free_vbdev(bdev[4]); 645 646 free_bdev(bdev[0]); 647 free_bdev(bdev[1]); 648 free_bdev(bdev[2]); 649 free_bdev(bdev[3]); 650 } 651 652 static void 653 bytes_to_blocks_test(void) 654 { 655 struct spdk_bdev bdev; 656 uint64_t offset_blocks, num_blocks; 657 658 memset(&bdev, 0, sizeof(bdev)); 659 660 bdev.blocklen = 512; 661 662 /* All parameters valid */ 663 offset_blocks = 0; 664 num_blocks = 0; 665 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 666 CU_ASSERT(offset_blocks == 1); 667 CU_ASSERT(num_blocks == 2); 668 669 /* Offset not a block multiple */ 670 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 671 672 /* Length not a block multiple */ 673 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 674 675 /* In case blocklen not the power of two */ 676 bdev.blocklen = 100; 677 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 678 CU_ASSERT(offset_blocks == 1); 679 CU_ASSERT(num_blocks == 2); 680 681 /* Offset not a block multiple */ 682 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 683 684 /* Length not a block multiple */ 685 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 686 } 687 688 static void 689 num_blocks_test(void) 690 { 691 struct spdk_bdev bdev; 692 struct spdk_bdev_desc *desc = NULL; 693 struct spdk_bdev_desc *desc_ext = NULL; 694 int rc; 695 696 memset(&bdev, 0, sizeof(bdev)); 697 bdev.name = "num_blocks"; 698 bdev.fn_table = &fn_table; 699 bdev.module = &bdev_ut_if; 700 spdk_bdev_register(&bdev); 701 spdk_bdev_notify_blockcnt_change(&bdev, 50); 702 703 /* Growing block number */ 704 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 705 /* Shrinking block number */ 706 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 707 708 /* In case bdev opened */ 709 rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc); 710 CU_ASSERT(rc == 0); 711 SPDK_CU_ASSERT_FATAL(desc != NULL); 712 713 /* Growing block number */ 714 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 715 /* Shrinking block number */ 716 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 717 718 /* In case bdev opened with ext API */ 719 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext); 720 CU_ASSERT(rc == 0); 721 SPDK_CU_ASSERT_FATAL(desc_ext != NULL); 722 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc_ext)); 723 724 g_event_type1 = 0xFF; 725 /* Growing block number */ 726 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 727 728 poll_threads(); 729 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 730 731 g_event_type1 = 0xFF; 732 /* Growing block number and closing */ 733 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 734 735 spdk_bdev_close(desc); 736 spdk_bdev_close(desc_ext); 737 spdk_bdev_unregister(&bdev, NULL, NULL); 738 739 poll_threads(); 740 741 /* Callback is not called for closed device */ 742 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 743 } 744 745 static void 746 io_valid_test(void) 747 { 748 struct spdk_bdev bdev; 749 750 memset(&bdev, 0, sizeof(bdev)); 751 752 bdev.blocklen = 512; 753 spdk_bdev_notify_blockcnt_change(&bdev, 100); 754 755 /* All parameters valid */ 756 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 757 758 /* Last valid block */ 759 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 760 761 /* Offset past end of bdev */ 762 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 763 764 /* Offset + length past end of bdev */ 765 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 766 767 /* Offset near end of uint64_t range (2^64 - 1) */ 768 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 769 } 770 771 static void 772 alias_add_del_test(void) 773 { 774 struct spdk_bdev *bdev[3]; 775 int rc; 776 777 /* Creating and registering bdevs */ 778 bdev[0] = allocate_bdev("bdev0"); 779 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 780 781 bdev[1] = allocate_bdev("bdev1"); 782 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 783 784 bdev[2] = allocate_bdev("bdev2"); 785 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 786 787 poll_threads(); 788 789 /* 790 * Trying adding an alias identical to name. 791 * Alias is identical to name, so it can not be added to aliases list 792 */ 793 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 794 CU_ASSERT(rc == -EEXIST); 795 796 /* 797 * Trying to add empty alias, 798 * this one should fail 799 */ 800 rc = spdk_bdev_alias_add(bdev[0], NULL); 801 CU_ASSERT(rc == -EINVAL); 802 803 /* Trying adding same alias to two different registered bdevs */ 804 805 /* Alias is used first time, so this one should pass */ 806 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 807 CU_ASSERT(rc == 0); 808 809 /* Alias was added to another bdev, so this one should fail */ 810 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 811 CU_ASSERT(rc == -EEXIST); 812 813 /* Alias is used first time, so this one should pass */ 814 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 815 CU_ASSERT(rc == 0); 816 817 /* Trying removing an alias from registered bdevs */ 818 819 /* Alias is not on a bdev aliases list, so this one should fail */ 820 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 821 CU_ASSERT(rc == -ENOENT); 822 823 /* Alias is present on a bdev aliases list, so this one should pass */ 824 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 825 CU_ASSERT(rc == 0); 826 827 /* Alias is present on a bdev aliases list, so this one should pass */ 828 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 829 CU_ASSERT(rc == 0); 830 831 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 832 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 833 CU_ASSERT(rc != 0); 834 835 /* Trying to del all alias from empty alias list */ 836 spdk_bdev_alias_del_all(bdev[2]); 837 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 838 839 /* Trying to del all alias from non-empty alias list */ 840 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 841 CU_ASSERT(rc == 0); 842 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 843 CU_ASSERT(rc == 0); 844 spdk_bdev_alias_del_all(bdev[2]); 845 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 846 847 /* Unregister and free bdevs */ 848 spdk_bdev_unregister(bdev[0], NULL, NULL); 849 spdk_bdev_unregister(bdev[1], NULL, NULL); 850 spdk_bdev_unregister(bdev[2], NULL, NULL); 851 852 poll_threads(); 853 854 free(bdev[0]); 855 free(bdev[1]); 856 free(bdev[2]); 857 } 858 859 static void 860 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 861 { 862 g_io_done = true; 863 g_io_status = bdev_io->internal.status; 864 spdk_bdev_free_io(bdev_io); 865 } 866 867 static void 868 bdev_init_cb(void *arg, int rc) 869 { 870 CU_ASSERT(rc == 0); 871 } 872 873 static void 874 bdev_fini_cb(void *arg) 875 { 876 } 877 878 struct bdev_ut_io_wait_entry { 879 struct spdk_bdev_io_wait_entry entry; 880 struct spdk_io_channel *io_ch; 881 struct spdk_bdev_desc *desc; 882 bool submitted; 883 }; 884 885 static void 886 io_wait_cb(void *arg) 887 { 888 struct bdev_ut_io_wait_entry *entry = arg; 889 int rc; 890 891 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 892 CU_ASSERT(rc == 0); 893 entry->submitted = true; 894 } 895 896 static void 897 bdev_io_types_test(void) 898 { 899 struct spdk_bdev *bdev; 900 struct spdk_bdev_desc *desc = NULL; 901 struct spdk_io_channel *io_ch; 902 struct spdk_bdev_opts bdev_opts = { 903 .bdev_io_pool_size = 4, 904 .bdev_io_cache_size = 2, 905 }; 906 int rc; 907 908 rc = spdk_bdev_set_opts(&bdev_opts); 909 CU_ASSERT(rc == 0); 910 spdk_bdev_initialize(bdev_init_cb, NULL); 911 poll_threads(); 912 913 bdev = allocate_bdev("bdev0"); 914 915 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 916 CU_ASSERT(rc == 0); 917 poll_threads(); 918 SPDK_CU_ASSERT_FATAL(desc != NULL); 919 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 920 io_ch = spdk_bdev_get_io_channel(desc); 921 CU_ASSERT(io_ch != NULL); 922 923 /* WRITE and WRITE ZEROES are not supported */ 924 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 925 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 926 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 927 CU_ASSERT(rc == -ENOTSUP); 928 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 929 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 930 931 spdk_put_io_channel(io_ch); 932 spdk_bdev_close(desc); 933 free_bdev(bdev); 934 spdk_bdev_finish(bdev_fini_cb, NULL); 935 poll_threads(); 936 } 937 938 static void 939 bdev_io_wait_test(void) 940 { 941 struct spdk_bdev *bdev; 942 struct spdk_bdev_desc *desc = NULL; 943 struct spdk_io_channel *io_ch; 944 struct spdk_bdev_opts bdev_opts = { 945 .bdev_io_pool_size = 4, 946 .bdev_io_cache_size = 2, 947 }; 948 struct bdev_ut_io_wait_entry io_wait_entry; 949 struct bdev_ut_io_wait_entry io_wait_entry2; 950 int rc; 951 952 rc = spdk_bdev_set_opts(&bdev_opts); 953 CU_ASSERT(rc == 0); 954 spdk_bdev_initialize(bdev_init_cb, NULL); 955 poll_threads(); 956 957 bdev = allocate_bdev("bdev0"); 958 959 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 960 CU_ASSERT(rc == 0); 961 poll_threads(); 962 SPDK_CU_ASSERT_FATAL(desc != NULL); 963 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 964 io_ch = spdk_bdev_get_io_channel(desc); 965 CU_ASSERT(io_ch != NULL); 966 967 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 968 CU_ASSERT(rc == 0); 969 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 970 CU_ASSERT(rc == 0); 971 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 972 CU_ASSERT(rc == 0); 973 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 974 CU_ASSERT(rc == 0); 975 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 976 977 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 978 CU_ASSERT(rc == -ENOMEM); 979 980 io_wait_entry.entry.bdev = bdev; 981 io_wait_entry.entry.cb_fn = io_wait_cb; 982 io_wait_entry.entry.cb_arg = &io_wait_entry; 983 io_wait_entry.io_ch = io_ch; 984 io_wait_entry.desc = desc; 985 io_wait_entry.submitted = false; 986 /* Cannot use the same io_wait_entry for two different calls. */ 987 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 988 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 989 990 /* Queue two I/O waits. */ 991 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 992 CU_ASSERT(rc == 0); 993 CU_ASSERT(io_wait_entry.submitted == false); 994 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 995 CU_ASSERT(rc == 0); 996 CU_ASSERT(io_wait_entry2.submitted == false); 997 998 stub_complete_io(1); 999 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1000 CU_ASSERT(io_wait_entry.submitted == true); 1001 CU_ASSERT(io_wait_entry2.submitted == false); 1002 1003 stub_complete_io(1); 1004 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1005 CU_ASSERT(io_wait_entry2.submitted == true); 1006 1007 stub_complete_io(4); 1008 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1009 1010 spdk_put_io_channel(io_ch); 1011 spdk_bdev_close(desc); 1012 free_bdev(bdev); 1013 spdk_bdev_finish(bdev_fini_cb, NULL); 1014 poll_threads(); 1015 } 1016 1017 static void 1018 bdev_io_spans_boundary_test(void) 1019 { 1020 struct spdk_bdev bdev; 1021 struct spdk_bdev_io bdev_io; 1022 1023 memset(&bdev, 0, sizeof(bdev)); 1024 1025 bdev.optimal_io_boundary = 0; 1026 bdev_io.bdev = &bdev; 1027 1028 /* bdev has no optimal_io_boundary set - so this should return false. */ 1029 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1030 1031 bdev.optimal_io_boundary = 32; 1032 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1033 1034 /* RESETs are not based on LBAs - so this should return false. */ 1035 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1036 1037 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1038 bdev_io.u.bdev.offset_blocks = 0; 1039 bdev_io.u.bdev.num_blocks = 32; 1040 1041 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1042 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1043 1044 bdev_io.u.bdev.num_blocks = 33; 1045 1046 /* This I/O spans a boundary. */ 1047 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1048 } 1049 1050 static void 1051 bdev_io_split_test(void) 1052 { 1053 struct spdk_bdev *bdev; 1054 struct spdk_bdev_desc *desc = NULL; 1055 struct spdk_io_channel *io_ch; 1056 struct spdk_bdev_opts bdev_opts = { 1057 .bdev_io_pool_size = 512, 1058 .bdev_io_cache_size = 64, 1059 }; 1060 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1061 struct ut_expected_io *expected_io; 1062 void *md_buf = (void *)0xFF000000; 1063 uint64_t i; 1064 int rc; 1065 1066 rc = spdk_bdev_set_opts(&bdev_opts); 1067 CU_ASSERT(rc == 0); 1068 spdk_bdev_initialize(bdev_init_cb, NULL); 1069 1070 bdev = allocate_bdev("bdev0"); 1071 1072 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1073 CU_ASSERT(rc == 0); 1074 SPDK_CU_ASSERT_FATAL(desc != NULL); 1075 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1076 io_ch = spdk_bdev_get_io_channel(desc); 1077 CU_ASSERT(io_ch != NULL); 1078 1079 bdev->optimal_io_boundary = 16; 1080 bdev->split_on_optimal_io_boundary = false; 1081 1082 g_io_done = false; 1083 1084 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1085 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1086 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1087 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1088 1089 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1090 CU_ASSERT(rc == 0); 1091 CU_ASSERT(g_io_done == false); 1092 1093 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1094 stub_complete_io(1); 1095 CU_ASSERT(g_io_done == true); 1096 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1097 1098 bdev->split_on_optimal_io_boundary = true; 1099 bdev->md_interleave = false; 1100 bdev->md_len = 8; 1101 1102 /* Now test that a single-vector command is split correctly. 1103 * Offset 14, length 8, payload 0xF000 1104 * Child - Offset 14, length 2, payload 0xF000 1105 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1106 * 1107 * Set up the expected values before calling spdk_bdev_read_blocks 1108 */ 1109 g_io_done = false; 1110 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1111 expected_io->md_buf = md_buf; 1112 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1113 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1114 1115 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1116 expected_io->md_buf = md_buf + 2 * 8; 1117 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1118 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1119 1120 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1121 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1122 14, 8, io_done, NULL); 1123 CU_ASSERT(rc == 0); 1124 CU_ASSERT(g_io_done == false); 1125 1126 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1127 stub_complete_io(2); 1128 CU_ASSERT(g_io_done == true); 1129 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1130 1131 /* Now set up a more complex, multi-vector command that needs to be split, 1132 * including splitting iovecs. 1133 */ 1134 iov[0].iov_base = (void *)0x10000; 1135 iov[0].iov_len = 512; 1136 iov[1].iov_base = (void *)0x20000; 1137 iov[1].iov_len = 20 * 512; 1138 iov[2].iov_base = (void *)0x30000; 1139 iov[2].iov_len = 11 * 512; 1140 1141 g_io_done = false; 1142 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1143 expected_io->md_buf = md_buf; 1144 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1145 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1146 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1147 1148 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1149 expected_io->md_buf = md_buf + 2 * 8; 1150 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1151 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1152 1153 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1154 expected_io->md_buf = md_buf + 18 * 8; 1155 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1156 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1157 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1158 1159 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1160 14, 32, io_done, NULL); 1161 CU_ASSERT(rc == 0); 1162 CU_ASSERT(g_io_done == false); 1163 1164 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1165 stub_complete_io(3); 1166 CU_ASSERT(g_io_done == true); 1167 1168 /* Test multi vector command that needs to be split by strip and then needs to be 1169 * split further due to the capacity of child iovs. 1170 */ 1171 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1172 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1173 iov[i].iov_len = 512; 1174 } 1175 1176 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1177 g_io_done = false; 1178 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1179 BDEV_IO_NUM_CHILD_IOV); 1180 expected_io->md_buf = md_buf; 1181 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1182 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1183 } 1184 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1185 1186 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1187 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1188 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1189 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1190 ut_expected_io_set_iov(expected_io, i, 1191 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1192 } 1193 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1194 1195 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1196 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1197 CU_ASSERT(rc == 0); 1198 CU_ASSERT(g_io_done == false); 1199 1200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1201 stub_complete_io(1); 1202 CU_ASSERT(g_io_done == false); 1203 1204 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1205 stub_complete_io(1); 1206 CU_ASSERT(g_io_done == true); 1207 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1208 1209 /* Test multi vector command that needs to be split by strip and then needs to be 1210 * split further due to the capacity of child iovs. In this case, the length of 1211 * the rest of iovec array with an I/O boundary is the multiple of block size. 1212 */ 1213 1214 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1215 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1216 */ 1217 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1218 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1219 iov[i].iov_len = 512; 1220 } 1221 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1222 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1223 iov[i].iov_len = 256; 1224 } 1225 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1226 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1227 1228 /* Add an extra iovec to trigger split */ 1229 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1230 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1231 1232 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1233 g_io_done = false; 1234 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1235 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1236 expected_io->md_buf = md_buf; 1237 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1238 ut_expected_io_set_iov(expected_io, i, 1239 (void *)((i + 1) * 0x10000), 512); 1240 } 1241 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1242 ut_expected_io_set_iov(expected_io, i, 1243 (void *)((i + 1) * 0x10000), 256); 1244 } 1245 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1246 1247 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1248 1, 1); 1249 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1250 ut_expected_io_set_iov(expected_io, 0, 1251 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1252 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1253 1254 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1255 1, 1); 1256 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1257 ut_expected_io_set_iov(expected_io, 0, 1258 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1259 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1260 1261 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1262 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1263 CU_ASSERT(rc == 0); 1264 CU_ASSERT(g_io_done == false); 1265 1266 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1267 stub_complete_io(1); 1268 CU_ASSERT(g_io_done == false); 1269 1270 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1271 stub_complete_io(2); 1272 CU_ASSERT(g_io_done == true); 1273 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1274 1275 /* Test multi vector command that needs to be split by strip and then needs to be 1276 * split further due to the capacity of child iovs, the child request offset should 1277 * be rewind to last aligned offset and go success without error. 1278 */ 1279 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1280 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1281 iov[i].iov_len = 512; 1282 } 1283 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1284 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1285 1286 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1287 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1288 1289 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1290 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1291 1292 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1293 g_io_done = false; 1294 g_io_status = 0; 1295 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1296 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1297 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1298 expected_io->md_buf = md_buf; 1299 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1300 ut_expected_io_set_iov(expected_io, i, 1301 (void *)((i + 1) * 0x10000), 512); 1302 } 1303 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1304 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1305 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1306 1, 2); 1307 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1308 ut_expected_io_set_iov(expected_io, 0, 1309 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1310 ut_expected_io_set_iov(expected_io, 1, 1311 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1312 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1313 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1314 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1315 1, 1); 1316 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1317 ut_expected_io_set_iov(expected_io, 0, 1318 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1319 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1320 1321 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1322 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1323 CU_ASSERT(rc == 0); 1324 CU_ASSERT(g_io_done == false); 1325 1326 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1327 stub_complete_io(1); 1328 CU_ASSERT(g_io_done == false); 1329 1330 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1331 stub_complete_io(2); 1332 CU_ASSERT(g_io_done == true); 1333 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1334 1335 /* Test multi vector command that needs to be split due to the IO boundary and 1336 * the capacity of child iovs. Especially test the case when the command is 1337 * split due to the capacity of child iovs, the tail address is not aligned with 1338 * block size and is rewinded to the aligned address. 1339 * 1340 * The iovecs used in read request is complex but is based on the data 1341 * collected in the real issue. We change the base addresses but keep the lengths 1342 * not to loose the credibility of the test. 1343 */ 1344 bdev->optimal_io_boundary = 128; 1345 g_io_done = false; 1346 g_io_status = 0; 1347 1348 for (i = 0; i < 31; i++) { 1349 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1350 iov[i].iov_len = 1024; 1351 } 1352 iov[31].iov_base = (void *)0xFEED1F00000; 1353 iov[31].iov_len = 32768; 1354 iov[32].iov_base = (void *)0xFEED2000000; 1355 iov[32].iov_len = 160; 1356 iov[33].iov_base = (void *)0xFEED2100000; 1357 iov[33].iov_len = 4096; 1358 iov[34].iov_base = (void *)0xFEED2200000; 1359 iov[34].iov_len = 4096; 1360 iov[35].iov_base = (void *)0xFEED2300000; 1361 iov[35].iov_len = 4096; 1362 iov[36].iov_base = (void *)0xFEED2400000; 1363 iov[36].iov_len = 4096; 1364 iov[37].iov_base = (void *)0xFEED2500000; 1365 iov[37].iov_len = 4096; 1366 iov[38].iov_base = (void *)0xFEED2600000; 1367 iov[38].iov_len = 4096; 1368 iov[39].iov_base = (void *)0xFEED2700000; 1369 iov[39].iov_len = 4096; 1370 iov[40].iov_base = (void *)0xFEED2800000; 1371 iov[40].iov_len = 4096; 1372 iov[41].iov_base = (void *)0xFEED2900000; 1373 iov[41].iov_len = 4096; 1374 iov[42].iov_base = (void *)0xFEED2A00000; 1375 iov[42].iov_len = 4096; 1376 iov[43].iov_base = (void *)0xFEED2B00000; 1377 iov[43].iov_len = 12288; 1378 iov[44].iov_base = (void *)0xFEED2C00000; 1379 iov[44].iov_len = 8192; 1380 iov[45].iov_base = (void *)0xFEED2F00000; 1381 iov[45].iov_len = 4096; 1382 iov[46].iov_base = (void *)0xFEED3000000; 1383 iov[46].iov_len = 4096; 1384 iov[47].iov_base = (void *)0xFEED3100000; 1385 iov[47].iov_len = 4096; 1386 iov[48].iov_base = (void *)0xFEED3200000; 1387 iov[48].iov_len = 24576; 1388 iov[49].iov_base = (void *)0xFEED3300000; 1389 iov[49].iov_len = 16384; 1390 iov[50].iov_base = (void *)0xFEED3400000; 1391 iov[50].iov_len = 12288; 1392 iov[51].iov_base = (void *)0xFEED3500000; 1393 iov[51].iov_len = 4096; 1394 iov[52].iov_base = (void *)0xFEED3600000; 1395 iov[52].iov_len = 4096; 1396 iov[53].iov_base = (void *)0xFEED3700000; 1397 iov[53].iov_len = 4096; 1398 iov[54].iov_base = (void *)0xFEED3800000; 1399 iov[54].iov_len = 28672; 1400 iov[55].iov_base = (void *)0xFEED3900000; 1401 iov[55].iov_len = 20480; 1402 iov[56].iov_base = (void *)0xFEED3A00000; 1403 iov[56].iov_len = 4096; 1404 iov[57].iov_base = (void *)0xFEED3B00000; 1405 iov[57].iov_len = 12288; 1406 iov[58].iov_base = (void *)0xFEED3C00000; 1407 iov[58].iov_len = 4096; 1408 iov[59].iov_base = (void *)0xFEED3D00000; 1409 iov[59].iov_len = 4096; 1410 iov[60].iov_base = (void *)0xFEED3E00000; 1411 iov[60].iov_len = 352; 1412 1413 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1414 * of child iovs, 1415 */ 1416 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1417 expected_io->md_buf = md_buf; 1418 for (i = 0; i < 32; i++) { 1419 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1420 } 1421 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1422 1423 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1424 * split by the IO boundary requirement. 1425 */ 1426 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1427 expected_io->md_buf = md_buf + 126 * 8; 1428 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1429 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1431 1432 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1433 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1434 */ 1435 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1436 expected_io->md_buf = md_buf + 128 * 8; 1437 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1438 iov[33].iov_len - 864); 1439 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1440 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1441 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1442 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1443 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1444 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1445 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1446 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1447 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1448 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1449 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1450 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1451 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1452 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1453 1454 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1455 * first 864 bytes of iov[52] split by the IO boundary requirement. 1456 */ 1457 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1458 expected_io->md_buf = md_buf + 256 * 8; 1459 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1460 iov[46].iov_len - 864); 1461 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1462 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1463 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1464 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1465 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1466 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1467 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1468 1469 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1470 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1471 */ 1472 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1473 expected_io->md_buf = md_buf + 384 * 8; 1474 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1475 iov[52].iov_len - 864); 1476 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1477 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1478 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1479 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1480 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1481 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1482 1483 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1484 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1485 */ 1486 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1487 expected_io->md_buf = md_buf + 512 * 8; 1488 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1489 iov[57].iov_len - 4960); 1490 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1491 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1492 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1493 1494 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1495 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1496 expected_io->md_buf = md_buf + 542 * 8; 1497 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1498 iov[59].iov_len - 3936); 1499 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1500 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1501 1502 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1503 0, 543, io_done, NULL); 1504 CU_ASSERT(rc == 0); 1505 CU_ASSERT(g_io_done == false); 1506 1507 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1508 stub_complete_io(1); 1509 CU_ASSERT(g_io_done == false); 1510 1511 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1512 stub_complete_io(5); 1513 CU_ASSERT(g_io_done == false); 1514 1515 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1516 stub_complete_io(1); 1517 CU_ASSERT(g_io_done == true); 1518 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1519 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1520 1521 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1522 * split, so test that. 1523 */ 1524 bdev->optimal_io_boundary = 15; 1525 g_io_done = false; 1526 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1527 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1528 1529 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1530 CU_ASSERT(rc == 0); 1531 CU_ASSERT(g_io_done == false); 1532 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1533 stub_complete_io(1); 1534 CU_ASSERT(g_io_done == true); 1535 1536 /* Test an UNMAP. This should also not be split. */ 1537 bdev->optimal_io_boundary = 16; 1538 g_io_done = false; 1539 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1540 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1541 1542 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1543 CU_ASSERT(rc == 0); 1544 CU_ASSERT(g_io_done == false); 1545 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1546 stub_complete_io(1); 1547 CU_ASSERT(g_io_done == true); 1548 1549 /* Test a FLUSH. This should also not be split. */ 1550 bdev->optimal_io_boundary = 16; 1551 g_io_done = false; 1552 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1553 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1554 1555 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1556 CU_ASSERT(rc == 0); 1557 CU_ASSERT(g_io_done == false); 1558 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1559 stub_complete_io(1); 1560 CU_ASSERT(g_io_done == true); 1561 1562 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1563 1564 /* Children requests return an error status */ 1565 bdev->optimal_io_boundary = 16; 1566 iov[0].iov_base = (void *)0x10000; 1567 iov[0].iov_len = 512 * 64; 1568 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1569 g_io_done = false; 1570 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1571 1572 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1573 CU_ASSERT(rc == 0); 1574 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1575 stub_complete_io(4); 1576 CU_ASSERT(g_io_done == false); 1577 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1578 stub_complete_io(1); 1579 CU_ASSERT(g_io_done == true); 1580 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1581 1582 /* Test if a multi vector command terminated with failure before continueing 1583 * splitting process when one of child I/O failed. 1584 * The multi vector command is as same as the above that needs to be split by strip 1585 * and then needs to be split further due to the capacity of child iovs. 1586 */ 1587 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1588 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1589 iov[i].iov_len = 512; 1590 } 1591 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1592 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1593 1594 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1595 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1596 1597 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1598 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1599 1600 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1601 1602 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1603 g_io_done = false; 1604 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1605 1606 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1607 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1608 CU_ASSERT(rc == 0); 1609 CU_ASSERT(g_io_done == false); 1610 1611 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1612 stub_complete_io(1); 1613 CU_ASSERT(g_io_done == true); 1614 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1615 1616 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1617 1618 /* for this test we will create the following conditions to hit the code path where 1619 * we are trying to send and IO following a split that has no iovs because we had to 1620 * trim them for alignment reasons. 1621 * 1622 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1623 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1624 * position 30 and overshoot by 0x2e. 1625 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1626 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1627 * which eliniates that vector so we just send the first split IO with 30 vectors 1628 * and let the completion pick up the last 2 vectors. 1629 */ 1630 bdev->optimal_io_boundary = 32; 1631 bdev->split_on_optimal_io_boundary = true; 1632 g_io_done = false; 1633 1634 /* Init all parent IOVs to 0x212 */ 1635 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1636 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1637 iov[i].iov_len = 0x212; 1638 } 1639 1640 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1641 BDEV_IO_NUM_CHILD_IOV - 1); 1642 /* expect 0-29 to be 1:1 with the parent iov */ 1643 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1644 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1645 } 1646 1647 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1648 * where 0x1e is the amount we overshot the 16K boundary 1649 */ 1650 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1651 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1652 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1653 1654 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1655 * shortened that take it to the next boundary and then a final one to get us to 1656 * 0x4200 bytes for the IO. 1657 */ 1658 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1659 BDEV_IO_NUM_CHILD_IOV, 2); 1660 /* position 30 picked up the remaining bytes to the next boundary */ 1661 ut_expected_io_set_iov(expected_io, 0, 1662 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1663 1664 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */ 1665 ut_expected_io_set_iov(expected_io, 1, 1666 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1667 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1668 1669 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1670 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1671 CU_ASSERT(rc == 0); 1672 CU_ASSERT(g_io_done == false); 1673 1674 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1675 stub_complete_io(1); 1676 CU_ASSERT(g_io_done == false); 1677 1678 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1679 stub_complete_io(1); 1680 CU_ASSERT(g_io_done == true); 1681 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1682 1683 spdk_put_io_channel(io_ch); 1684 spdk_bdev_close(desc); 1685 free_bdev(bdev); 1686 spdk_bdev_finish(bdev_fini_cb, NULL); 1687 poll_threads(); 1688 } 1689 1690 static void 1691 bdev_io_split_with_io_wait(void) 1692 { 1693 struct spdk_bdev *bdev; 1694 struct spdk_bdev_desc *desc = NULL; 1695 struct spdk_io_channel *io_ch; 1696 struct spdk_bdev_channel *channel; 1697 struct spdk_bdev_mgmt_channel *mgmt_ch; 1698 struct spdk_bdev_opts bdev_opts = { 1699 .bdev_io_pool_size = 2, 1700 .bdev_io_cache_size = 1, 1701 }; 1702 struct iovec iov[3]; 1703 struct ut_expected_io *expected_io; 1704 int rc; 1705 1706 rc = spdk_bdev_set_opts(&bdev_opts); 1707 CU_ASSERT(rc == 0); 1708 spdk_bdev_initialize(bdev_init_cb, NULL); 1709 1710 bdev = allocate_bdev("bdev0"); 1711 1712 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1713 CU_ASSERT(rc == 0); 1714 CU_ASSERT(desc != NULL); 1715 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1716 io_ch = spdk_bdev_get_io_channel(desc); 1717 CU_ASSERT(io_ch != NULL); 1718 channel = spdk_io_channel_get_ctx(io_ch); 1719 mgmt_ch = channel->shared_resource->mgmt_ch; 1720 1721 bdev->optimal_io_boundary = 16; 1722 bdev->split_on_optimal_io_boundary = true; 1723 1724 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1725 CU_ASSERT(rc == 0); 1726 1727 /* Now test that a single-vector command is split correctly. 1728 * Offset 14, length 8, payload 0xF000 1729 * Child - Offset 14, length 2, payload 0xF000 1730 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1731 * 1732 * Set up the expected values before calling spdk_bdev_read_blocks 1733 */ 1734 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1735 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1736 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1737 1738 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1739 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1740 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1741 1742 /* The following children will be submitted sequentially due to the capacity of 1743 * spdk_bdev_io. 1744 */ 1745 1746 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 1747 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1748 CU_ASSERT(rc == 0); 1749 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1750 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1751 1752 /* Completing the first read I/O will submit the first child */ 1753 stub_complete_io(1); 1754 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 1755 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1756 1757 /* Completing the first child will submit the second child */ 1758 stub_complete_io(1); 1759 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1760 1761 /* Complete the second child I/O. This should result in our callback getting 1762 * invoked since the parent I/O is now complete. 1763 */ 1764 stub_complete_io(1); 1765 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1766 1767 /* Now set up a more complex, multi-vector command that needs to be split, 1768 * including splitting iovecs. 1769 */ 1770 iov[0].iov_base = (void *)0x10000; 1771 iov[0].iov_len = 512; 1772 iov[1].iov_base = (void *)0x20000; 1773 iov[1].iov_len = 20 * 512; 1774 iov[2].iov_base = (void *)0x30000; 1775 iov[2].iov_len = 11 * 512; 1776 1777 g_io_done = false; 1778 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1779 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1780 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1781 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1782 1783 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1784 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1785 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1786 1787 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1788 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1789 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1790 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1791 1792 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 1793 CU_ASSERT(rc == 0); 1794 CU_ASSERT(g_io_done == false); 1795 1796 /* The following children will be submitted sequentially due to the capacity of 1797 * spdk_bdev_io. 1798 */ 1799 1800 /* Completing the first child will submit the second child */ 1801 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1802 stub_complete_io(1); 1803 CU_ASSERT(g_io_done == false); 1804 1805 /* Completing the second child will submit the third child */ 1806 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1807 stub_complete_io(1); 1808 CU_ASSERT(g_io_done == false); 1809 1810 /* Completing the third child will result in our callback getting invoked 1811 * since the parent I/O is now complete. 1812 */ 1813 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1814 stub_complete_io(1); 1815 CU_ASSERT(g_io_done == true); 1816 1817 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1818 1819 spdk_put_io_channel(io_ch); 1820 spdk_bdev_close(desc); 1821 free_bdev(bdev); 1822 spdk_bdev_finish(bdev_fini_cb, NULL); 1823 poll_threads(); 1824 } 1825 1826 static void 1827 bdev_io_alignment(void) 1828 { 1829 struct spdk_bdev *bdev; 1830 struct spdk_bdev_desc *desc = NULL; 1831 struct spdk_io_channel *io_ch; 1832 struct spdk_bdev_opts bdev_opts = { 1833 .bdev_io_pool_size = 20, 1834 .bdev_io_cache_size = 2, 1835 }; 1836 int rc; 1837 void *buf = NULL; 1838 struct iovec iovs[2]; 1839 int iovcnt; 1840 uint64_t alignment; 1841 1842 rc = spdk_bdev_set_opts(&bdev_opts); 1843 CU_ASSERT(rc == 0); 1844 spdk_bdev_initialize(bdev_init_cb, NULL); 1845 1846 fn_table.submit_request = stub_submit_request_get_buf; 1847 bdev = allocate_bdev("bdev0"); 1848 1849 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1850 CU_ASSERT(rc == 0); 1851 CU_ASSERT(desc != NULL); 1852 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1853 io_ch = spdk_bdev_get_io_channel(desc); 1854 CU_ASSERT(io_ch != NULL); 1855 1856 /* Create aligned buffer */ 1857 rc = posix_memalign(&buf, 4096, 8192); 1858 SPDK_CU_ASSERT_FATAL(rc == 0); 1859 1860 /* Pass aligned single buffer with no alignment required */ 1861 alignment = 1; 1862 bdev->required_alignment = spdk_u32log2(alignment); 1863 1864 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1865 CU_ASSERT(rc == 0); 1866 stub_complete_io(1); 1867 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1868 alignment)); 1869 1870 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 1871 CU_ASSERT(rc == 0); 1872 stub_complete_io(1); 1873 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1874 alignment)); 1875 1876 /* Pass unaligned single buffer with no alignment required */ 1877 alignment = 1; 1878 bdev->required_alignment = spdk_u32log2(alignment); 1879 1880 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1881 CU_ASSERT(rc == 0); 1882 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1883 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1884 stub_complete_io(1); 1885 1886 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1887 CU_ASSERT(rc == 0); 1888 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1889 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 1890 stub_complete_io(1); 1891 1892 /* Pass unaligned single buffer with 512 alignment required */ 1893 alignment = 512; 1894 bdev->required_alignment = spdk_u32log2(alignment); 1895 1896 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1897 CU_ASSERT(rc == 0); 1898 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1899 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1900 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1901 alignment)); 1902 stub_complete_io(1); 1903 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1904 1905 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 1906 CU_ASSERT(rc == 0); 1907 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1908 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1909 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1910 alignment)); 1911 stub_complete_io(1); 1912 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1913 1914 /* Pass unaligned single buffer with 4096 alignment required */ 1915 alignment = 4096; 1916 bdev->required_alignment = spdk_u32log2(alignment); 1917 1918 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1919 CU_ASSERT(rc == 0); 1920 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1921 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1922 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1923 alignment)); 1924 stub_complete_io(1); 1925 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1926 1927 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 1928 CU_ASSERT(rc == 0); 1929 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 1930 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1931 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1932 alignment)); 1933 stub_complete_io(1); 1934 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1935 1936 /* Pass aligned iovs with no alignment required */ 1937 alignment = 1; 1938 bdev->required_alignment = spdk_u32log2(alignment); 1939 1940 iovcnt = 1; 1941 iovs[0].iov_base = buf; 1942 iovs[0].iov_len = 512; 1943 1944 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1945 CU_ASSERT(rc == 0); 1946 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1947 stub_complete_io(1); 1948 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1949 1950 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1951 CU_ASSERT(rc == 0); 1952 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1953 stub_complete_io(1); 1954 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1955 1956 /* Pass unaligned iovs with no alignment required */ 1957 alignment = 1; 1958 bdev->required_alignment = spdk_u32log2(alignment); 1959 1960 iovcnt = 2; 1961 iovs[0].iov_base = buf + 16; 1962 iovs[0].iov_len = 256; 1963 iovs[1].iov_base = buf + 16 + 256 + 32; 1964 iovs[1].iov_len = 256; 1965 1966 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1967 CU_ASSERT(rc == 0); 1968 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1969 stub_complete_io(1); 1970 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1971 1972 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1973 CU_ASSERT(rc == 0); 1974 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1975 stub_complete_io(1); 1976 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 1977 1978 /* Pass unaligned iov with 2048 alignment required */ 1979 alignment = 2048; 1980 bdev->required_alignment = spdk_u32log2(alignment); 1981 1982 iovcnt = 2; 1983 iovs[0].iov_base = buf + 16; 1984 iovs[0].iov_len = 256; 1985 iovs[1].iov_base = buf + 16 + 256 + 32; 1986 iovs[1].iov_len = 256; 1987 1988 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1989 CU_ASSERT(rc == 0); 1990 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 1991 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 1992 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 1993 alignment)); 1994 stub_complete_io(1); 1995 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 1996 1997 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 1998 CU_ASSERT(rc == 0); 1999 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2000 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2001 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2002 alignment)); 2003 stub_complete_io(1); 2004 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2005 2006 /* Pass iov without allocated buffer without alignment required */ 2007 alignment = 1; 2008 bdev->required_alignment = spdk_u32log2(alignment); 2009 2010 iovcnt = 1; 2011 iovs[0].iov_base = NULL; 2012 iovs[0].iov_len = 0; 2013 2014 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2015 CU_ASSERT(rc == 0); 2016 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2017 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2018 alignment)); 2019 stub_complete_io(1); 2020 2021 /* Pass iov without allocated buffer with 1024 alignment required */ 2022 alignment = 1024; 2023 bdev->required_alignment = spdk_u32log2(alignment); 2024 2025 iovcnt = 1; 2026 iovs[0].iov_base = NULL; 2027 iovs[0].iov_len = 0; 2028 2029 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2030 CU_ASSERT(rc == 0); 2031 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2032 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2033 alignment)); 2034 stub_complete_io(1); 2035 2036 spdk_put_io_channel(io_ch); 2037 spdk_bdev_close(desc); 2038 free_bdev(bdev); 2039 fn_table.submit_request = stub_submit_request; 2040 spdk_bdev_finish(bdev_fini_cb, NULL); 2041 poll_threads(); 2042 2043 free(buf); 2044 } 2045 2046 static void 2047 bdev_io_alignment_with_boundary(void) 2048 { 2049 struct spdk_bdev *bdev; 2050 struct spdk_bdev_desc *desc = NULL; 2051 struct spdk_io_channel *io_ch; 2052 struct spdk_bdev_opts bdev_opts = { 2053 .bdev_io_pool_size = 20, 2054 .bdev_io_cache_size = 2, 2055 }; 2056 int rc; 2057 void *buf = NULL; 2058 struct iovec iovs[2]; 2059 int iovcnt; 2060 uint64_t alignment; 2061 2062 rc = spdk_bdev_set_opts(&bdev_opts); 2063 CU_ASSERT(rc == 0); 2064 spdk_bdev_initialize(bdev_init_cb, NULL); 2065 2066 fn_table.submit_request = stub_submit_request_get_buf; 2067 bdev = allocate_bdev("bdev0"); 2068 2069 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2070 CU_ASSERT(rc == 0); 2071 CU_ASSERT(desc != NULL); 2072 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2073 io_ch = spdk_bdev_get_io_channel(desc); 2074 CU_ASSERT(io_ch != NULL); 2075 2076 /* Create aligned buffer */ 2077 rc = posix_memalign(&buf, 4096, 131072); 2078 SPDK_CU_ASSERT_FATAL(rc == 0); 2079 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2080 2081 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 2082 alignment = 512; 2083 bdev->required_alignment = spdk_u32log2(alignment); 2084 bdev->optimal_io_boundary = 2; 2085 bdev->split_on_optimal_io_boundary = true; 2086 2087 iovcnt = 1; 2088 iovs[0].iov_base = NULL; 2089 iovs[0].iov_len = 512 * 3; 2090 2091 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2092 CU_ASSERT(rc == 0); 2093 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2094 stub_complete_io(2); 2095 2096 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 2097 alignment = 512; 2098 bdev->required_alignment = spdk_u32log2(alignment); 2099 bdev->optimal_io_boundary = 16; 2100 bdev->split_on_optimal_io_boundary = true; 2101 2102 iovcnt = 1; 2103 iovs[0].iov_base = NULL; 2104 iovs[0].iov_len = 512 * 16; 2105 2106 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 2107 CU_ASSERT(rc == 0); 2108 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2109 stub_complete_io(2); 2110 2111 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 2112 alignment = 512; 2113 bdev->required_alignment = spdk_u32log2(alignment); 2114 bdev->optimal_io_boundary = 128; 2115 bdev->split_on_optimal_io_boundary = true; 2116 2117 iovcnt = 1; 2118 iovs[0].iov_base = buf + 16; 2119 iovs[0].iov_len = 512 * 160; 2120 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2121 CU_ASSERT(rc == 0); 2122 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2123 stub_complete_io(2); 2124 2125 /* 512 * 3 with 2 IO boundary */ 2126 alignment = 512; 2127 bdev->required_alignment = spdk_u32log2(alignment); 2128 bdev->optimal_io_boundary = 2; 2129 bdev->split_on_optimal_io_boundary = true; 2130 2131 iovcnt = 2; 2132 iovs[0].iov_base = buf + 16; 2133 iovs[0].iov_len = 512; 2134 iovs[1].iov_base = buf + 16 + 512 + 32; 2135 iovs[1].iov_len = 1024; 2136 2137 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2138 CU_ASSERT(rc == 0); 2139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2140 stub_complete_io(2); 2141 2142 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 2143 CU_ASSERT(rc == 0); 2144 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2145 stub_complete_io(2); 2146 2147 /* 512 * 64 with 32 IO boundary */ 2148 bdev->optimal_io_boundary = 32; 2149 iovcnt = 2; 2150 iovs[0].iov_base = buf + 16; 2151 iovs[0].iov_len = 16384; 2152 iovs[1].iov_base = buf + 16 + 16384 + 32; 2153 iovs[1].iov_len = 16384; 2154 2155 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2156 CU_ASSERT(rc == 0); 2157 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2158 stub_complete_io(3); 2159 2160 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 2161 CU_ASSERT(rc == 0); 2162 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2163 stub_complete_io(3); 2164 2165 /* 512 * 160 with 32 IO boundary */ 2166 iovcnt = 1; 2167 iovs[0].iov_base = buf + 16; 2168 iovs[0].iov_len = 16384 + 65536; 2169 2170 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 2171 CU_ASSERT(rc == 0); 2172 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2173 stub_complete_io(6); 2174 2175 spdk_put_io_channel(io_ch); 2176 spdk_bdev_close(desc); 2177 free_bdev(bdev); 2178 fn_table.submit_request = stub_submit_request; 2179 spdk_bdev_finish(bdev_fini_cb, NULL); 2180 poll_threads(); 2181 2182 free(buf); 2183 } 2184 2185 static void 2186 histogram_status_cb(void *cb_arg, int status) 2187 { 2188 g_status = status; 2189 } 2190 2191 static void 2192 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 2193 { 2194 g_status = status; 2195 g_histogram = histogram; 2196 } 2197 2198 static void 2199 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 2200 uint64_t total, uint64_t so_far) 2201 { 2202 g_count += count; 2203 } 2204 2205 static void 2206 bdev_histograms(void) 2207 { 2208 struct spdk_bdev *bdev; 2209 struct spdk_bdev_desc *desc = NULL; 2210 struct spdk_io_channel *ch; 2211 struct spdk_histogram_data *histogram; 2212 uint8_t buf[4096]; 2213 int rc; 2214 2215 spdk_bdev_initialize(bdev_init_cb, NULL); 2216 2217 bdev = allocate_bdev("bdev"); 2218 2219 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2220 CU_ASSERT(rc == 0); 2221 CU_ASSERT(desc != NULL); 2222 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2223 2224 ch = spdk_bdev_get_io_channel(desc); 2225 CU_ASSERT(ch != NULL); 2226 2227 /* Enable histogram */ 2228 g_status = -1; 2229 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 2230 poll_threads(); 2231 CU_ASSERT(g_status == 0); 2232 CU_ASSERT(bdev->internal.histogram_enabled == true); 2233 2234 /* Allocate histogram */ 2235 histogram = spdk_histogram_data_alloc(); 2236 SPDK_CU_ASSERT_FATAL(histogram != NULL); 2237 2238 /* Check if histogram is zeroed */ 2239 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2240 poll_threads(); 2241 CU_ASSERT(g_status == 0); 2242 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2243 2244 g_count = 0; 2245 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2246 2247 CU_ASSERT(g_count == 0); 2248 2249 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2250 CU_ASSERT(rc == 0); 2251 2252 spdk_delay_us(10); 2253 stub_complete_io(1); 2254 poll_threads(); 2255 2256 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 2257 CU_ASSERT(rc == 0); 2258 2259 spdk_delay_us(10); 2260 stub_complete_io(1); 2261 poll_threads(); 2262 2263 /* Check if histogram gathered data from all I/O channels */ 2264 g_histogram = NULL; 2265 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2266 poll_threads(); 2267 CU_ASSERT(g_status == 0); 2268 CU_ASSERT(bdev->internal.histogram_enabled == true); 2269 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 2270 2271 g_count = 0; 2272 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 2273 CU_ASSERT(g_count == 2); 2274 2275 /* Disable histogram */ 2276 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 2277 poll_threads(); 2278 CU_ASSERT(g_status == 0); 2279 CU_ASSERT(bdev->internal.histogram_enabled == false); 2280 2281 /* Try to run histogram commands on disabled bdev */ 2282 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 2283 poll_threads(); 2284 CU_ASSERT(g_status == -EFAULT); 2285 2286 spdk_histogram_data_free(histogram); 2287 spdk_put_io_channel(ch); 2288 spdk_bdev_close(desc); 2289 free_bdev(bdev); 2290 spdk_bdev_finish(bdev_fini_cb, NULL); 2291 poll_threads(); 2292 } 2293 2294 static void 2295 _bdev_compare(bool emulated) 2296 { 2297 struct spdk_bdev *bdev; 2298 struct spdk_bdev_desc *desc = NULL; 2299 struct spdk_io_channel *ioch; 2300 struct ut_expected_io *expected_io; 2301 uint64_t offset, num_blocks; 2302 uint32_t num_completed; 2303 char aa_buf[512]; 2304 char bb_buf[512]; 2305 struct iovec compare_iov; 2306 uint8_t io_type; 2307 int rc; 2308 2309 if (emulated) { 2310 io_type = SPDK_BDEV_IO_TYPE_READ; 2311 } else { 2312 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 2313 } 2314 2315 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2316 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2317 2318 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 2319 2320 spdk_bdev_initialize(bdev_init_cb, NULL); 2321 fn_table.submit_request = stub_submit_request_get_buf; 2322 bdev = allocate_bdev("bdev"); 2323 2324 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2325 CU_ASSERT_EQUAL(rc, 0); 2326 SPDK_CU_ASSERT_FATAL(desc != NULL); 2327 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2328 ioch = spdk_bdev_get_io_channel(desc); 2329 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2330 2331 fn_table.submit_request = stub_submit_request_get_buf; 2332 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2333 2334 offset = 50; 2335 num_blocks = 1; 2336 compare_iov.iov_base = aa_buf; 2337 compare_iov.iov_len = sizeof(aa_buf); 2338 2339 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2340 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2341 2342 g_io_done = false; 2343 g_compare_read_buf = aa_buf; 2344 g_compare_read_buf_len = sizeof(aa_buf); 2345 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2346 CU_ASSERT_EQUAL(rc, 0); 2347 num_completed = stub_complete_io(1); 2348 CU_ASSERT_EQUAL(num_completed, 1); 2349 CU_ASSERT(g_io_done == true); 2350 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2351 2352 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 2353 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2354 2355 g_io_done = false; 2356 g_compare_read_buf = bb_buf; 2357 g_compare_read_buf_len = sizeof(bb_buf); 2358 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 2359 CU_ASSERT_EQUAL(rc, 0); 2360 num_completed = stub_complete_io(1); 2361 CU_ASSERT_EQUAL(num_completed, 1); 2362 CU_ASSERT(g_io_done == true); 2363 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2364 2365 spdk_put_io_channel(ioch); 2366 spdk_bdev_close(desc); 2367 free_bdev(bdev); 2368 fn_table.submit_request = stub_submit_request; 2369 spdk_bdev_finish(bdev_fini_cb, NULL); 2370 poll_threads(); 2371 2372 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2373 2374 g_compare_read_buf = NULL; 2375 } 2376 2377 static void 2378 bdev_compare(void) 2379 { 2380 _bdev_compare(true); 2381 _bdev_compare(false); 2382 } 2383 2384 static void 2385 bdev_compare_and_write(void) 2386 { 2387 struct spdk_bdev *bdev; 2388 struct spdk_bdev_desc *desc = NULL; 2389 struct spdk_io_channel *ioch; 2390 struct ut_expected_io *expected_io; 2391 uint64_t offset, num_blocks; 2392 uint32_t num_completed; 2393 char aa_buf[512]; 2394 char bb_buf[512]; 2395 char cc_buf[512]; 2396 char write_buf[512]; 2397 struct iovec compare_iov; 2398 struct iovec write_iov; 2399 int rc; 2400 2401 memset(aa_buf, 0xaa, sizeof(aa_buf)); 2402 memset(bb_buf, 0xbb, sizeof(bb_buf)); 2403 memset(cc_buf, 0xcc, sizeof(cc_buf)); 2404 2405 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 2406 2407 spdk_bdev_initialize(bdev_init_cb, NULL); 2408 fn_table.submit_request = stub_submit_request_get_buf; 2409 bdev = allocate_bdev("bdev"); 2410 2411 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2412 CU_ASSERT_EQUAL(rc, 0); 2413 SPDK_CU_ASSERT_FATAL(desc != NULL); 2414 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2415 ioch = spdk_bdev_get_io_channel(desc); 2416 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2417 2418 fn_table.submit_request = stub_submit_request_get_buf; 2419 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2420 2421 offset = 50; 2422 num_blocks = 1; 2423 compare_iov.iov_base = aa_buf; 2424 compare_iov.iov_len = sizeof(aa_buf); 2425 write_iov.iov_base = bb_buf; 2426 write_iov.iov_len = sizeof(bb_buf); 2427 2428 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2429 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2430 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 2431 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2432 2433 g_io_done = false; 2434 g_compare_read_buf = aa_buf; 2435 g_compare_read_buf_len = sizeof(aa_buf); 2436 memset(write_buf, 0, sizeof(write_buf)); 2437 g_compare_write_buf = write_buf; 2438 g_compare_write_buf_len = sizeof(write_buf); 2439 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2440 offset, num_blocks, io_done, NULL); 2441 /* Trigger range locking */ 2442 poll_threads(); 2443 CU_ASSERT_EQUAL(rc, 0); 2444 num_completed = stub_complete_io(1); 2445 CU_ASSERT_EQUAL(num_completed, 1); 2446 CU_ASSERT(g_io_done == false); 2447 num_completed = stub_complete_io(1); 2448 /* Trigger range unlocking */ 2449 poll_threads(); 2450 CU_ASSERT_EQUAL(num_completed, 1); 2451 CU_ASSERT(g_io_done == true); 2452 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2453 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 2454 2455 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 2456 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2457 2458 g_io_done = false; 2459 g_compare_read_buf = cc_buf; 2460 g_compare_read_buf_len = sizeof(cc_buf); 2461 memset(write_buf, 0, sizeof(write_buf)); 2462 g_compare_write_buf = write_buf; 2463 g_compare_write_buf_len = sizeof(write_buf); 2464 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 2465 offset, num_blocks, io_done, NULL); 2466 /* Trigger range locking */ 2467 poll_threads(); 2468 CU_ASSERT_EQUAL(rc, 0); 2469 num_completed = stub_complete_io(1); 2470 /* Trigger range unlocking earlier because we expect error here */ 2471 poll_threads(); 2472 CU_ASSERT_EQUAL(num_completed, 1); 2473 CU_ASSERT(g_io_done == true); 2474 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 2475 num_completed = stub_complete_io(1); 2476 CU_ASSERT_EQUAL(num_completed, 0); 2477 2478 spdk_put_io_channel(ioch); 2479 spdk_bdev_close(desc); 2480 free_bdev(bdev); 2481 fn_table.submit_request = stub_submit_request; 2482 spdk_bdev_finish(bdev_fini_cb, NULL); 2483 poll_threads(); 2484 2485 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 2486 2487 g_compare_read_buf = NULL; 2488 g_compare_write_buf = NULL; 2489 } 2490 2491 static void 2492 bdev_write_zeroes(void) 2493 { 2494 struct spdk_bdev *bdev; 2495 struct spdk_bdev_desc *desc = NULL; 2496 struct spdk_io_channel *ioch; 2497 struct ut_expected_io *expected_io; 2498 uint64_t offset, num_io_blocks, num_blocks; 2499 uint32_t num_completed, num_requests; 2500 int rc; 2501 2502 spdk_bdev_initialize(bdev_init_cb, NULL); 2503 bdev = allocate_bdev("bdev"); 2504 2505 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 2506 CU_ASSERT_EQUAL(rc, 0); 2507 SPDK_CU_ASSERT_FATAL(desc != NULL); 2508 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2509 ioch = spdk_bdev_get_io_channel(desc); 2510 SPDK_CU_ASSERT_FATAL(ioch != NULL); 2511 2512 fn_table.submit_request = stub_submit_request; 2513 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 2514 2515 /* First test that if the bdev supports write_zeroes, the request won't be split */ 2516 bdev->md_len = 0; 2517 bdev->blocklen = 4096; 2518 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2519 2520 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 2521 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2522 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2523 CU_ASSERT_EQUAL(rc, 0); 2524 num_completed = stub_complete_io(1); 2525 CU_ASSERT_EQUAL(num_completed, 1); 2526 2527 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 2528 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 2529 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 2530 num_requests = 2; 2531 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 2532 2533 for (offset = 0; offset < num_requests; ++offset) { 2534 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2535 offset * num_io_blocks, num_io_blocks, 0); 2536 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2537 } 2538 2539 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2540 CU_ASSERT_EQUAL(rc, 0); 2541 num_completed = stub_complete_io(num_requests); 2542 CU_ASSERT_EQUAL(num_completed, num_requests); 2543 2544 /* Check that the splitting is correct if bdev has interleaved metadata */ 2545 bdev->md_interleave = true; 2546 bdev->md_len = 64; 2547 bdev->blocklen = 4096 + 64; 2548 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 2549 2550 num_requests = offset = 0; 2551 while (offset < num_blocks) { 2552 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 2553 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2554 offset, num_io_blocks, 0); 2555 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2556 offset += num_io_blocks; 2557 num_requests++; 2558 } 2559 2560 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2561 CU_ASSERT_EQUAL(rc, 0); 2562 num_completed = stub_complete_io(num_requests); 2563 CU_ASSERT_EQUAL(num_completed, num_requests); 2564 num_completed = stub_complete_io(num_requests); 2565 assert(num_completed == 0); 2566 2567 /* Check the the same for separate metadata buffer */ 2568 bdev->md_interleave = false; 2569 bdev->md_len = 64; 2570 bdev->blocklen = 4096; 2571 2572 num_requests = offset = 0; 2573 while (offset < num_blocks) { 2574 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 2575 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 2576 offset, num_io_blocks, 0); 2577 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 2578 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2579 offset += num_io_blocks; 2580 num_requests++; 2581 } 2582 2583 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 2584 CU_ASSERT_EQUAL(rc, 0); 2585 num_completed = stub_complete_io(num_requests); 2586 CU_ASSERT_EQUAL(num_completed, num_requests); 2587 2588 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 2589 spdk_put_io_channel(ioch); 2590 spdk_bdev_close(desc); 2591 free_bdev(bdev); 2592 spdk_bdev_finish(bdev_fini_cb, NULL); 2593 poll_threads(); 2594 } 2595 2596 static void 2597 bdev_open_while_hotremove(void) 2598 { 2599 struct spdk_bdev *bdev; 2600 struct spdk_bdev_desc *desc[2] = {}; 2601 int rc; 2602 2603 bdev = allocate_bdev("bdev"); 2604 2605 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 2606 CU_ASSERT(rc == 0); 2607 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 2608 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 2609 2610 spdk_bdev_unregister(bdev, NULL, NULL); 2611 2612 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 2613 CU_ASSERT(rc == -ENODEV); 2614 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 2615 2616 spdk_bdev_close(desc[0]); 2617 free_bdev(bdev); 2618 } 2619 2620 static void 2621 bdev_close_while_hotremove(void) 2622 { 2623 struct spdk_bdev *bdev; 2624 struct spdk_bdev_desc *desc = NULL; 2625 int rc = 0; 2626 2627 bdev = allocate_bdev("bdev"); 2628 2629 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 2630 CU_ASSERT_EQUAL(rc, 0); 2631 SPDK_CU_ASSERT_FATAL(desc != NULL); 2632 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2633 2634 /* Simulate hot-unplug by unregistering bdev */ 2635 g_event_type1 = 0xFF; 2636 g_unregister_arg = NULL; 2637 g_unregister_rc = -1; 2638 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 2639 /* Close device while remove event is in flight */ 2640 spdk_bdev_close(desc); 2641 2642 /* Ensure that unregister callback is delayed */ 2643 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 2644 CU_ASSERT_EQUAL(g_unregister_rc, -1); 2645 2646 poll_threads(); 2647 2648 /* Event callback shall not be issued because device was closed */ 2649 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 2650 /* Unregister callback is issued */ 2651 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 2652 CU_ASSERT_EQUAL(g_unregister_rc, 0); 2653 2654 free_bdev(bdev); 2655 } 2656 2657 static void 2658 bdev_open_ext(void) 2659 { 2660 struct spdk_bdev *bdev; 2661 struct spdk_bdev_desc *desc1 = NULL; 2662 struct spdk_bdev_desc *desc2 = NULL; 2663 int rc = 0; 2664 2665 bdev = allocate_bdev("bdev"); 2666 2667 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 2668 CU_ASSERT_EQUAL(rc, -EINVAL); 2669 2670 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 2671 CU_ASSERT_EQUAL(rc, 0); 2672 2673 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 2674 CU_ASSERT_EQUAL(rc, 0); 2675 2676 g_event_type1 = 0xFF; 2677 g_event_type2 = 0xFF; 2678 2679 /* Simulate hot-unplug by unregistering bdev */ 2680 spdk_bdev_unregister(bdev, NULL, NULL); 2681 poll_threads(); 2682 2683 /* Check if correct events have been triggered in event callback fn */ 2684 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 2685 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 2686 2687 free_bdev(bdev); 2688 poll_threads(); 2689 } 2690 2691 struct timeout_io_cb_arg { 2692 struct iovec iov; 2693 uint8_t type; 2694 }; 2695 2696 static int 2697 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 2698 { 2699 struct spdk_bdev_io *bdev_io; 2700 int n = 0; 2701 2702 if (!ch) { 2703 return -1; 2704 } 2705 2706 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 2707 n++; 2708 } 2709 2710 return n; 2711 } 2712 2713 static void 2714 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 2715 { 2716 struct timeout_io_cb_arg *ctx = cb_arg; 2717 2718 ctx->type = bdev_io->type; 2719 ctx->iov.iov_base = bdev_io->iov.iov_base; 2720 ctx->iov.iov_len = bdev_io->iov.iov_len; 2721 } 2722 2723 static void 2724 bdev_set_io_timeout(void) 2725 { 2726 struct spdk_bdev *bdev; 2727 struct spdk_bdev_desc *desc = NULL; 2728 struct spdk_io_channel *io_ch = NULL; 2729 struct spdk_bdev_channel *bdev_ch = NULL; 2730 struct timeout_io_cb_arg cb_arg; 2731 2732 spdk_bdev_initialize(bdev_init_cb, NULL); 2733 2734 bdev = allocate_bdev("bdev"); 2735 2736 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 2737 SPDK_CU_ASSERT_FATAL(desc != NULL); 2738 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2739 2740 io_ch = spdk_bdev_get_io_channel(desc); 2741 CU_ASSERT(io_ch != NULL); 2742 2743 bdev_ch = spdk_io_channel_get_ctx(io_ch); 2744 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 2745 2746 /* This is the part1. 2747 * We will check the bdev_ch->io_submitted list 2748 * TO make sure that it can link IOs and only the user submitted IOs 2749 */ 2750 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 2751 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2752 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 2753 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2754 stub_complete_io(1); 2755 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2756 stub_complete_io(1); 2757 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2758 2759 /* Split IO */ 2760 bdev->optimal_io_boundary = 16; 2761 bdev->split_on_optimal_io_boundary = true; 2762 2763 /* Now test that a single-vector command is split correctly. 2764 * Offset 14, length 8, payload 0xF000 2765 * Child - Offset 14, length 2, payload 0xF000 2766 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2767 * 2768 * Set up the expected values before calling spdk_bdev_read_blocks 2769 */ 2770 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2771 /* We count all submitted IOs including IO that are generated by splitting. */ 2772 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 2773 stub_complete_io(1); 2774 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 2775 stub_complete_io(1); 2776 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2777 2778 /* Also include the reset IO */ 2779 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2780 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 2781 poll_threads(); 2782 stub_complete_io(1); 2783 poll_threads(); 2784 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 2785 2786 /* This is part2 2787 * Test the desc timeout poller register 2788 */ 2789 2790 /* Successfully set the timeout */ 2791 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2792 CU_ASSERT(desc->io_timeout_poller != NULL); 2793 CU_ASSERT(desc->timeout_in_sec == 30); 2794 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2795 CU_ASSERT(desc->cb_arg == &cb_arg); 2796 2797 /* Change the timeout limit */ 2798 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2799 CU_ASSERT(desc->io_timeout_poller != NULL); 2800 CU_ASSERT(desc->timeout_in_sec == 20); 2801 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 2802 CU_ASSERT(desc->cb_arg == &cb_arg); 2803 2804 /* Disable the timeout */ 2805 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 2806 CU_ASSERT(desc->io_timeout_poller == NULL); 2807 2808 /* This the part3 2809 * We will test to catch timeout IO and check whether the IO is 2810 * the submitted one. 2811 */ 2812 memset(&cb_arg, 0, sizeof(cb_arg)); 2813 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 2814 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 2815 2816 /* Don't reach the limit */ 2817 spdk_delay_us(15 * spdk_get_ticks_hz()); 2818 poll_threads(); 2819 CU_ASSERT(cb_arg.type == 0); 2820 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2821 CU_ASSERT(cb_arg.iov.iov_len == 0); 2822 2823 /* 15 + 15 = 30 reach the limit */ 2824 spdk_delay_us(15 * spdk_get_ticks_hz()); 2825 poll_threads(); 2826 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2827 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 2828 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 2829 stub_complete_io(1); 2830 2831 /* Use the same split IO above and check the IO */ 2832 memset(&cb_arg, 0, sizeof(cb_arg)); 2833 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 2834 2835 /* The first child complete in time */ 2836 spdk_delay_us(15 * spdk_get_ticks_hz()); 2837 poll_threads(); 2838 stub_complete_io(1); 2839 CU_ASSERT(cb_arg.type == 0); 2840 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 2841 CU_ASSERT(cb_arg.iov.iov_len == 0); 2842 2843 /* The second child reach the limit */ 2844 spdk_delay_us(15 * spdk_get_ticks_hz()); 2845 poll_threads(); 2846 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 2847 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 2848 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 2849 stub_complete_io(1); 2850 2851 /* Also include the reset IO */ 2852 memset(&cb_arg, 0, sizeof(cb_arg)); 2853 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 2854 spdk_delay_us(30 * spdk_get_ticks_hz()); 2855 poll_threads(); 2856 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 2857 stub_complete_io(1); 2858 poll_threads(); 2859 2860 spdk_put_io_channel(io_ch); 2861 spdk_bdev_close(desc); 2862 free_bdev(bdev); 2863 spdk_bdev_finish(bdev_fini_cb, NULL); 2864 poll_threads(); 2865 } 2866 2867 static void 2868 lba_range_overlap(void) 2869 { 2870 struct lba_range r1, r2; 2871 2872 r1.offset = 100; 2873 r1.length = 50; 2874 2875 r2.offset = 0; 2876 r2.length = 1; 2877 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2878 2879 r2.offset = 0; 2880 r2.length = 100; 2881 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2882 2883 r2.offset = 0; 2884 r2.length = 110; 2885 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2886 2887 r2.offset = 100; 2888 r2.length = 10; 2889 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2890 2891 r2.offset = 110; 2892 r2.length = 20; 2893 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2894 2895 r2.offset = 140; 2896 r2.length = 150; 2897 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2898 2899 r2.offset = 130; 2900 r2.length = 200; 2901 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 2902 2903 r2.offset = 150; 2904 r2.length = 100; 2905 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2906 2907 r2.offset = 110; 2908 r2.length = 0; 2909 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 2910 } 2911 2912 static bool g_lock_lba_range_done; 2913 static bool g_unlock_lba_range_done; 2914 2915 static void 2916 lock_lba_range_done(void *ctx, int status) 2917 { 2918 g_lock_lba_range_done = true; 2919 } 2920 2921 static void 2922 unlock_lba_range_done(void *ctx, int status) 2923 { 2924 g_unlock_lba_range_done = true; 2925 } 2926 2927 static void 2928 lock_lba_range_check_ranges(void) 2929 { 2930 struct spdk_bdev *bdev; 2931 struct spdk_bdev_desc *desc = NULL; 2932 struct spdk_io_channel *io_ch; 2933 struct spdk_bdev_channel *channel; 2934 struct lba_range *range; 2935 int ctx1; 2936 int rc; 2937 2938 spdk_bdev_initialize(bdev_init_cb, NULL); 2939 2940 bdev = allocate_bdev("bdev0"); 2941 2942 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2943 CU_ASSERT(rc == 0); 2944 CU_ASSERT(desc != NULL); 2945 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2946 io_ch = spdk_bdev_get_io_channel(desc); 2947 CU_ASSERT(io_ch != NULL); 2948 channel = spdk_io_channel_get_ctx(io_ch); 2949 2950 g_lock_lba_range_done = false; 2951 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 2952 CU_ASSERT(rc == 0); 2953 poll_threads(); 2954 2955 CU_ASSERT(g_lock_lba_range_done == true); 2956 range = TAILQ_FIRST(&channel->locked_ranges); 2957 SPDK_CU_ASSERT_FATAL(range != NULL); 2958 CU_ASSERT(range->offset == 20); 2959 CU_ASSERT(range->length == 10); 2960 CU_ASSERT(range->owner_ch == channel); 2961 2962 /* Unlocks must exactly match a lock. */ 2963 g_unlock_lba_range_done = false; 2964 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 2965 CU_ASSERT(rc == -EINVAL); 2966 CU_ASSERT(g_unlock_lba_range_done == false); 2967 2968 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 2969 CU_ASSERT(rc == 0); 2970 spdk_delay_us(100); 2971 poll_threads(); 2972 2973 CU_ASSERT(g_unlock_lba_range_done == true); 2974 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 2975 2976 spdk_put_io_channel(io_ch); 2977 spdk_bdev_close(desc); 2978 free_bdev(bdev); 2979 spdk_bdev_finish(bdev_fini_cb, NULL); 2980 poll_threads(); 2981 } 2982 2983 static void 2984 lock_lba_range_with_io_outstanding(void) 2985 { 2986 struct spdk_bdev *bdev; 2987 struct spdk_bdev_desc *desc = NULL; 2988 struct spdk_io_channel *io_ch; 2989 struct spdk_bdev_channel *channel; 2990 struct lba_range *range; 2991 char buf[4096]; 2992 int ctx1; 2993 int rc; 2994 2995 spdk_bdev_initialize(bdev_init_cb, NULL); 2996 2997 bdev = allocate_bdev("bdev0"); 2998 2999 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3000 CU_ASSERT(rc == 0); 3001 CU_ASSERT(desc != NULL); 3002 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3003 io_ch = spdk_bdev_get_io_channel(desc); 3004 CU_ASSERT(io_ch != NULL); 3005 channel = spdk_io_channel_get_ctx(io_ch); 3006 3007 g_io_done = false; 3008 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3009 CU_ASSERT(rc == 0); 3010 3011 g_lock_lba_range_done = false; 3012 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3013 CU_ASSERT(rc == 0); 3014 poll_threads(); 3015 3016 /* The lock should immediately become valid, since there are no outstanding 3017 * write I/O. 3018 */ 3019 CU_ASSERT(g_io_done == false); 3020 CU_ASSERT(g_lock_lba_range_done == true); 3021 range = TAILQ_FIRST(&channel->locked_ranges); 3022 SPDK_CU_ASSERT_FATAL(range != NULL); 3023 CU_ASSERT(range->offset == 20); 3024 CU_ASSERT(range->length == 10); 3025 CU_ASSERT(range->owner_ch == channel); 3026 CU_ASSERT(range->locked_ctx == &ctx1); 3027 3028 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3029 CU_ASSERT(rc == 0); 3030 stub_complete_io(1); 3031 spdk_delay_us(100); 3032 poll_threads(); 3033 3034 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3035 3036 /* Now try again, but with a write I/O. */ 3037 g_io_done = false; 3038 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 3039 CU_ASSERT(rc == 0); 3040 3041 g_lock_lba_range_done = false; 3042 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3043 CU_ASSERT(rc == 0); 3044 poll_threads(); 3045 3046 /* The lock should not be fully valid yet, since a write I/O is outstanding. 3047 * But note that the range should be on the channel's locked_list, to make sure no 3048 * new write I/O are started. 3049 */ 3050 CU_ASSERT(g_io_done == false); 3051 CU_ASSERT(g_lock_lba_range_done == false); 3052 range = TAILQ_FIRST(&channel->locked_ranges); 3053 SPDK_CU_ASSERT_FATAL(range != NULL); 3054 CU_ASSERT(range->offset == 20); 3055 CU_ASSERT(range->length == 10); 3056 3057 /* Complete the write I/O. This should make the lock valid (checked by confirming 3058 * our callback was invoked). 3059 */ 3060 stub_complete_io(1); 3061 spdk_delay_us(100); 3062 poll_threads(); 3063 CU_ASSERT(g_io_done == true); 3064 CU_ASSERT(g_lock_lba_range_done == true); 3065 3066 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3067 CU_ASSERT(rc == 0); 3068 poll_threads(); 3069 3070 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 3071 3072 spdk_put_io_channel(io_ch); 3073 spdk_bdev_close(desc); 3074 free_bdev(bdev); 3075 spdk_bdev_finish(bdev_fini_cb, NULL); 3076 poll_threads(); 3077 } 3078 3079 static void 3080 lock_lba_range_overlapped(void) 3081 { 3082 struct spdk_bdev *bdev; 3083 struct spdk_bdev_desc *desc = NULL; 3084 struct spdk_io_channel *io_ch; 3085 struct spdk_bdev_channel *channel; 3086 struct lba_range *range; 3087 int ctx1; 3088 int rc; 3089 3090 spdk_bdev_initialize(bdev_init_cb, NULL); 3091 3092 bdev = allocate_bdev("bdev0"); 3093 3094 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3095 CU_ASSERT(rc == 0); 3096 CU_ASSERT(desc != NULL); 3097 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3098 io_ch = spdk_bdev_get_io_channel(desc); 3099 CU_ASSERT(io_ch != NULL); 3100 channel = spdk_io_channel_get_ctx(io_ch); 3101 3102 /* Lock range 20-29. */ 3103 g_lock_lba_range_done = false; 3104 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 3105 CU_ASSERT(rc == 0); 3106 poll_threads(); 3107 3108 CU_ASSERT(g_lock_lba_range_done == true); 3109 range = TAILQ_FIRST(&channel->locked_ranges); 3110 SPDK_CU_ASSERT_FATAL(range != NULL); 3111 CU_ASSERT(range->offset == 20); 3112 CU_ASSERT(range->length == 10); 3113 3114 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 3115 * 20-29. 3116 */ 3117 g_lock_lba_range_done = false; 3118 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 3119 CU_ASSERT(rc == 0); 3120 poll_threads(); 3121 3122 CU_ASSERT(g_lock_lba_range_done == false); 3123 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3124 SPDK_CU_ASSERT_FATAL(range != NULL); 3125 CU_ASSERT(range->offset == 25); 3126 CU_ASSERT(range->length == 15); 3127 3128 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 3129 * no longer overlaps with an active lock. 3130 */ 3131 g_unlock_lba_range_done = false; 3132 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 3133 CU_ASSERT(rc == 0); 3134 poll_threads(); 3135 3136 CU_ASSERT(g_unlock_lba_range_done == true); 3137 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3138 range = TAILQ_FIRST(&channel->locked_ranges); 3139 SPDK_CU_ASSERT_FATAL(range != NULL); 3140 CU_ASSERT(range->offset == 25); 3141 CU_ASSERT(range->length == 15); 3142 3143 /* Lock 40-59. This should immediately lock since it does not overlap with the 3144 * currently active 25-39 lock. 3145 */ 3146 g_lock_lba_range_done = false; 3147 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 3148 CU_ASSERT(rc == 0); 3149 poll_threads(); 3150 3151 CU_ASSERT(g_lock_lba_range_done == true); 3152 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3153 SPDK_CU_ASSERT_FATAL(range != NULL); 3154 range = TAILQ_NEXT(range, tailq); 3155 SPDK_CU_ASSERT_FATAL(range != NULL); 3156 CU_ASSERT(range->offset == 40); 3157 CU_ASSERT(range->length == 20); 3158 3159 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 3160 g_lock_lba_range_done = false; 3161 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 3162 CU_ASSERT(rc == 0); 3163 poll_threads(); 3164 3165 CU_ASSERT(g_lock_lba_range_done == false); 3166 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3167 SPDK_CU_ASSERT_FATAL(range != NULL); 3168 CU_ASSERT(range->offset == 35); 3169 CU_ASSERT(range->length == 10); 3170 3171 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 3172 * the 40-59 lock is still active. 3173 */ 3174 g_unlock_lba_range_done = false; 3175 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 3176 CU_ASSERT(rc == 0); 3177 poll_threads(); 3178 3179 CU_ASSERT(g_unlock_lba_range_done == true); 3180 CU_ASSERT(g_lock_lba_range_done == false); 3181 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 3182 SPDK_CU_ASSERT_FATAL(range != NULL); 3183 CU_ASSERT(range->offset == 35); 3184 CU_ASSERT(range->length == 10); 3185 3186 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 3187 * no longer any active overlapping locks. 3188 */ 3189 g_unlock_lba_range_done = false; 3190 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 3191 CU_ASSERT(rc == 0); 3192 poll_threads(); 3193 3194 CU_ASSERT(g_unlock_lba_range_done == true); 3195 CU_ASSERT(g_lock_lba_range_done == true); 3196 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 3197 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 3198 SPDK_CU_ASSERT_FATAL(range != NULL); 3199 CU_ASSERT(range->offset == 35); 3200 CU_ASSERT(range->length == 10); 3201 3202 /* Finally, unlock 35-44. */ 3203 g_unlock_lba_range_done = false; 3204 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 3205 CU_ASSERT(rc == 0); 3206 poll_threads(); 3207 3208 CU_ASSERT(g_unlock_lba_range_done == true); 3209 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 3210 3211 spdk_put_io_channel(io_ch); 3212 spdk_bdev_close(desc); 3213 free_bdev(bdev); 3214 spdk_bdev_finish(bdev_fini_cb, NULL); 3215 poll_threads(); 3216 } 3217 3218 static void 3219 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 3220 { 3221 g_abort_done = true; 3222 g_abort_status = bdev_io->internal.status; 3223 spdk_bdev_free_io(bdev_io); 3224 } 3225 3226 static void 3227 bdev_io_abort(void) 3228 { 3229 struct spdk_bdev *bdev; 3230 struct spdk_bdev_desc *desc = NULL; 3231 struct spdk_io_channel *io_ch; 3232 struct spdk_bdev_channel *channel; 3233 struct spdk_bdev_mgmt_channel *mgmt_ch; 3234 struct spdk_bdev_opts bdev_opts = { 3235 .bdev_io_pool_size = 7, 3236 .bdev_io_cache_size = 2, 3237 }; 3238 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 3239 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 3240 int rc; 3241 3242 rc = spdk_bdev_set_opts(&bdev_opts); 3243 CU_ASSERT(rc == 0); 3244 spdk_bdev_initialize(bdev_init_cb, NULL); 3245 3246 bdev = allocate_bdev("bdev0"); 3247 3248 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3249 CU_ASSERT(rc == 0); 3250 CU_ASSERT(desc != NULL); 3251 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3252 io_ch = spdk_bdev_get_io_channel(desc); 3253 CU_ASSERT(io_ch != NULL); 3254 channel = spdk_io_channel_get_ctx(io_ch); 3255 mgmt_ch = channel->shared_resource->mgmt_ch; 3256 3257 g_abort_done = false; 3258 3259 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 3260 3261 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3262 CU_ASSERT(rc == -ENOTSUP); 3263 3264 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 3265 3266 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 3267 CU_ASSERT(rc == 0); 3268 CU_ASSERT(g_abort_done == true); 3269 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 3270 3271 /* Test the case that the target I/O was successfully aborted. */ 3272 g_io_done = false; 3273 3274 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3275 CU_ASSERT(rc == 0); 3276 CU_ASSERT(g_io_done == false); 3277 3278 g_abort_done = false; 3279 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3280 3281 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3282 CU_ASSERT(rc == 0); 3283 CU_ASSERT(g_io_done == true); 3284 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3285 stub_complete_io(1); 3286 CU_ASSERT(g_abort_done == true); 3287 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3288 3289 /* Test the case that the target I/O was not aborted because it completed 3290 * in the middle of execution of the abort. 3291 */ 3292 g_io_done = false; 3293 3294 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 3295 CU_ASSERT(rc == 0); 3296 CU_ASSERT(g_io_done == false); 3297 3298 g_abort_done = false; 3299 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3300 3301 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3302 CU_ASSERT(rc == 0); 3303 CU_ASSERT(g_io_done == false); 3304 3305 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3306 stub_complete_io(1); 3307 CU_ASSERT(g_io_done == true); 3308 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3309 3310 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 3311 stub_complete_io(1); 3312 CU_ASSERT(g_abort_done == true); 3313 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3314 3315 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3316 3317 bdev->optimal_io_boundary = 16; 3318 bdev->split_on_optimal_io_boundary = true; 3319 3320 /* Test that a single-vector command which is split is aborted correctly. 3321 * Offset 14, length 8, payload 0xF000 3322 * Child - Offset 14, length 2, payload 0xF000 3323 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3324 */ 3325 g_io_done = false; 3326 3327 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 3328 CU_ASSERT(rc == 0); 3329 CU_ASSERT(g_io_done == false); 3330 3331 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3332 3333 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3334 3335 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3336 CU_ASSERT(rc == 0); 3337 CU_ASSERT(g_io_done == true); 3338 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3339 stub_complete_io(2); 3340 CU_ASSERT(g_abort_done == true); 3341 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3342 3343 /* Test that a multi-vector command that needs to be split by strip and then 3344 * needs to be split is aborted correctly. Abort is requested before the second 3345 * child I/O was submitted. The parent I/O should complete with failure without 3346 * submitting the second child I/O. 3347 */ 3348 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 3349 iov[i].iov_base = (void *)((i + 1) * 0x10000); 3350 iov[i].iov_len = 512; 3351 } 3352 3353 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 3354 g_io_done = false; 3355 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 3356 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 3357 CU_ASSERT(rc == 0); 3358 CU_ASSERT(g_io_done == false); 3359 3360 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3361 3362 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3363 3364 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3365 CU_ASSERT(rc == 0); 3366 CU_ASSERT(g_io_done == true); 3367 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3368 stub_complete_io(1); 3369 CU_ASSERT(g_abort_done == true); 3370 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3371 3372 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3373 3374 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3375 3376 bdev->optimal_io_boundary = 16; 3377 g_io_done = false; 3378 3379 /* Test that a ingle-vector command which is split is aborted correctly. 3380 * Differently from the above, the child abort request will be submitted 3381 * sequentially due to the capacity of spdk_bdev_io. 3382 */ 3383 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 3384 CU_ASSERT(rc == 0); 3385 CU_ASSERT(g_io_done == false); 3386 3387 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 3388 3389 g_abort_done = false; 3390 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3391 3392 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 3393 CU_ASSERT(rc == 0); 3394 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3395 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 3396 3397 stub_complete_io(1); 3398 CU_ASSERT(g_io_done == true); 3399 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3400 stub_complete_io(3); 3401 CU_ASSERT(g_abort_done == true); 3402 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3403 3404 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3405 3406 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3407 3408 spdk_put_io_channel(io_ch); 3409 spdk_bdev_close(desc); 3410 free_bdev(bdev); 3411 spdk_bdev_finish(bdev_fini_cb, NULL); 3412 poll_threads(); 3413 } 3414 3415 int 3416 main(int argc, char **argv) 3417 { 3418 CU_pSuite suite = NULL; 3419 unsigned int num_failures; 3420 3421 CU_set_error_action(CUEA_ABORT); 3422 CU_initialize_registry(); 3423 3424 suite = CU_add_suite("bdev", null_init, null_clean); 3425 3426 CU_ADD_TEST(suite, bytes_to_blocks_test); 3427 CU_ADD_TEST(suite, num_blocks_test); 3428 CU_ADD_TEST(suite, io_valid_test); 3429 CU_ADD_TEST(suite, open_write_test); 3430 CU_ADD_TEST(suite, alias_add_del_test); 3431 CU_ADD_TEST(suite, get_device_stat_test); 3432 CU_ADD_TEST(suite, bdev_io_types_test); 3433 CU_ADD_TEST(suite, bdev_io_wait_test); 3434 CU_ADD_TEST(suite, bdev_io_spans_boundary_test); 3435 CU_ADD_TEST(suite, bdev_io_split_test); 3436 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 3437 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 3438 CU_ADD_TEST(suite, bdev_io_alignment); 3439 CU_ADD_TEST(suite, bdev_histograms); 3440 CU_ADD_TEST(suite, bdev_write_zeroes); 3441 CU_ADD_TEST(suite, bdev_compare_and_write); 3442 CU_ADD_TEST(suite, bdev_compare); 3443 CU_ADD_TEST(suite, bdev_open_while_hotremove); 3444 CU_ADD_TEST(suite, bdev_close_while_hotremove); 3445 CU_ADD_TEST(suite, bdev_open_ext); 3446 CU_ADD_TEST(suite, bdev_set_io_timeout); 3447 CU_ADD_TEST(suite, lba_range_overlap); 3448 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 3449 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 3450 CU_ADD_TEST(suite, lock_lba_range_overlapped); 3451 CU_ADD_TEST(suite, bdev_io_abort); 3452 3453 allocate_cores(1); 3454 allocate_threads(1); 3455 set_thread(0); 3456 3457 CU_basic_set_mode(CU_BRM_VERBOSE); 3458 CU_basic_run_tests(); 3459 num_failures = CU_get_number_of_failures(); 3460 CU_cleanup_registry(); 3461 3462 free_threads(); 3463 free_cores(); 3464 3465 return num_failures; 3466 } 3467