1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk_cunit.h" 36 37 #include "common/lib/ut_multithread.c" 38 #include "unit/lib/json_mock.c" 39 40 #include "spdk/config.h" 41 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 42 #undef SPDK_CONFIG_VTUNE 43 44 #include "bdev/bdev.c" 45 46 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 47 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 48 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 49 "test_domain"); 50 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 51 (struct spdk_memory_domain *domain), 0); 52 53 static bool g_memory_domain_pull_data_called; 54 static bool g_memory_domain_push_data_called; 55 56 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 57 int 58 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 59 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 60 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 61 { 62 g_memory_domain_pull_data_called = true; 63 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 64 cpl_cb(cpl_cb_arg, 0); 65 return 0; 66 } 67 68 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 69 int 70 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 71 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 72 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 73 { 74 g_memory_domain_push_data_called = true; 75 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 76 cpl_cb(cpl_cb_arg, 0); 77 return 0; 78 } 79 80 int g_status; 81 int g_count; 82 enum spdk_bdev_event_type g_event_type1; 83 enum spdk_bdev_event_type g_event_type2; 84 enum spdk_bdev_event_type g_event_type3; 85 enum spdk_bdev_event_type g_event_type4; 86 struct spdk_histogram_data *g_histogram; 87 void *g_unregister_arg; 88 int g_unregister_rc; 89 90 void 91 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 92 int *sc, int *sk, int *asc, int *ascq) 93 { 94 } 95 96 static int 97 null_init(void) 98 { 99 return 0; 100 } 101 102 static int 103 null_clean(void) 104 { 105 return 0; 106 } 107 108 static int 109 stub_destruct(void *ctx) 110 { 111 return 0; 112 } 113 114 struct ut_expected_io { 115 uint8_t type; 116 uint64_t offset; 117 uint64_t length; 118 int iovcnt; 119 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 120 void *md_buf; 121 struct spdk_bdev_ext_io_opts *ext_io_opts; 122 TAILQ_ENTRY(ut_expected_io) link; 123 }; 124 125 struct bdev_ut_channel { 126 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 127 uint32_t outstanding_io_count; 128 TAILQ_HEAD(, ut_expected_io) expected_io; 129 }; 130 131 static bool g_io_done; 132 static struct spdk_bdev_io *g_bdev_io; 133 static enum spdk_bdev_io_status g_io_status; 134 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 135 static uint32_t g_bdev_ut_io_device; 136 static struct bdev_ut_channel *g_bdev_ut_channel; 137 static void *g_compare_read_buf; 138 static uint32_t g_compare_read_buf_len; 139 static void *g_compare_write_buf; 140 static uint32_t g_compare_write_buf_len; 141 static bool g_abort_done; 142 static enum spdk_bdev_io_status g_abort_status; 143 static void *g_zcopy_read_buf; 144 static uint32_t g_zcopy_read_buf_len; 145 static void *g_zcopy_write_buf; 146 static uint32_t g_zcopy_write_buf_len; 147 static struct spdk_bdev_io *g_zcopy_bdev_io; 148 149 static struct ut_expected_io * 150 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 151 { 152 struct ut_expected_io *expected_io; 153 154 expected_io = calloc(1, sizeof(*expected_io)); 155 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 156 157 expected_io->type = type; 158 expected_io->offset = offset; 159 expected_io->length = length; 160 expected_io->iovcnt = iovcnt; 161 162 return expected_io; 163 } 164 165 static void 166 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 167 { 168 expected_io->iov[pos].iov_base = base; 169 expected_io->iov[pos].iov_len = len; 170 } 171 172 static void 173 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 174 { 175 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 176 struct ut_expected_io *expected_io; 177 struct iovec *iov, *expected_iov; 178 struct spdk_bdev_io *bio_to_abort; 179 int i; 180 181 g_bdev_io = bdev_io; 182 183 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 184 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 185 186 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 187 CU_ASSERT(g_compare_read_buf_len == len); 188 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 189 } 190 191 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 192 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 193 194 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 195 CU_ASSERT(g_compare_write_buf_len == len); 196 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 197 } 198 199 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 200 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 201 202 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 203 CU_ASSERT(g_compare_read_buf_len == len); 204 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 205 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 206 } 207 } 208 209 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 210 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 211 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 212 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 213 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 214 ch->outstanding_io_count--; 215 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 216 break; 217 } 218 } 219 } 220 } 221 222 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 223 if (bdev_io->u.bdev.zcopy.start) { 224 g_zcopy_bdev_io = bdev_io; 225 if (bdev_io->u.bdev.zcopy.populate) { 226 /* Start of a read */ 227 CU_ASSERT(g_zcopy_read_buf != NULL); 228 CU_ASSERT(g_zcopy_read_buf_len > 0); 229 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 230 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 231 bdev_io->u.bdev.iovcnt = 1; 232 } else { 233 /* Start of a write */ 234 CU_ASSERT(g_zcopy_write_buf != NULL); 235 CU_ASSERT(g_zcopy_write_buf_len > 0); 236 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 237 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 238 bdev_io->u.bdev.iovcnt = 1; 239 } 240 } else { 241 if (bdev_io->u.bdev.zcopy.commit) { 242 /* End of write */ 243 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 244 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 245 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 246 g_zcopy_write_buf = NULL; 247 g_zcopy_write_buf_len = 0; 248 } else { 249 /* End of read */ 250 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 251 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 252 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 253 g_zcopy_read_buf = NULL; 254 g_zcopy_read_buf_len = 0; 255 } 256 } 257 } 258 259 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 260 ch->outstanding_io_count++; 261 262 expected_io = TAILQ_FIRST(&ch->expected_io); 263 if (expected_io == NULL) { 264 return; 265 } 266 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 267 268 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 269 CU_ASSERT(bdev_io->type == expected_io->type); 270 } 271 272 if (expected_io->md_buf != NULL) { 273 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 274 if (bdev_io->u.bdev.ext_opts) { 275 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata); 276 } 277 } 278 279 if (expected_io->length == 0) { 280 free(expected_io); 281 return; 282 } 283 284 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 285 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 286 287 if (expected_io->iovcnt == 0) { 288 free(expected_io); 289 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 290 return; 291 } 292 293 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 294 for (i = 0; i < expected_io->iovcnt; i++) { 295 expected_iov = &expected_io->iov[i]; 296 if (bdev_io->internal.orig_iovcnt == 0) { 297 iov = &bdev_io->u.bdev.iovs[i]; 298 } else { 299 iov = bdev_io->internal.orig_iovs; 300 } 301 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 302 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 303 } 304 305 if (expected_io->ext_io_opts) { 306 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts) 307 } 308 309 free(expected_io); 310 } 311 312 static void 313 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 314 struct spdk_bdev_io *bdev_io, bool success) 315 { 316 CU_ASSERT(success == true); 317 318 stub_submit_request(_ch, bdev_io); 319 } 320 321 static void 322 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 323 { 324 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 325 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 326 } 327 328 static uint32_t 329 stub_complete_io(uint32_t num_to_complete) 330 { 331 struct bdev_ut_channel *ch = g_bdev_ut_channel; 332 struct spdk_bdev_io *bdev_io; 333 static enum spdk_bdev_io_status io_status; 334 uint32_t num_completed = 0; 335 336 while (num_completed < num_to_complete) { 337 if (TAILQ_EMPTY(&ch->outstanding_io)) { 338 break; 339 } 340 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 341 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 342 ch->outstanding_io_count--; 343 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 344 g_io_exp_status; 345 spdk_bdev_io_complete(bdev_io, io_status); 346 num_completed++; 347 } 348 349 return num_completed; 350 } 351 352 static struct spdk_io_channel * 353 bdev_ut_get_io_channel(void *ctx) 354 { 355 return spdk_get_io_channel(&g_bdev_ut_io_device); 356 } 357 358 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 359 [SPDK_BDEV_IO_TYPE_READ] = true, 360 [SPDK_BDEV_IO_TYPE_WRITE] = true, 361 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 362 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 363 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 364 [SPDK_BDEV_IO_TYPE_RESET] = true, 365 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 366 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 367 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 368 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 369 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 370 [SPDK_BDEV_IO_TYPE_ABORT] = true, 371 }; 372 373 static void 374 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 375 { 376 g_io_types_supported[io_type] = enable; 377 } 378 379 static bool 380 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 381 { 382 return g_io_types_supported[io_type]; 383 } 384 385 static struct spdk_bdev_fn_table fn_table = { 386 .destruct = stub_destruct, 387 .submit_request = stub_submit_request, 388 .get_io_channel = bdev_ut_get_io_channel, 389 .io_type_supported = stub_io_type_supported, 390 }; 391 392 static int 393 bdev_ut_create_ch(void *io_device, void *ctx_buf) 394 { 395 struct bdev_ut_channel *ch = ctx_buf; 396 397 CU_ASSERT(g_bdev_ut_channel == NULL); 398 g_bdev_ut_channel = ch; 399 400 TAILQ_INIT(&ch->outstanding_io); 401 ch->outstanding_io_count = 0; 402 TAILQ_INIT(&ch->expected_io); 403 return 0; 404 } 405 406 static void 407 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 408 { 409 CU_ASSERT(g_bdev_ut_channel != NULL); 410 g_bdev_ut_channel = NULL; 411 } 412 413 struct spdk_bdev_module bdev_ut_if; 414 415 static int 416 bdev_ut_module_init(void) 417 { 418 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 419 sizeof(struct bdev_ut_channel), NULL); 420 spdk_bdev_module_init_done(&bdev_ut_if); 421 return 0; 422 } 423 424 static void 425 bdev_ut_module_fini(void) 426 { 427 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 428 } 429 430 struct spdk_bdev_module bdev_ut_if = { 431 .name = "bdev_ut", 432 .module_init = bdev_ut_module_init, 433 .module_fini = bdev_ut_module_fini, 434 .async_init = true, 435 }; 436 437 static void vbdev_ut_examine(struct spdk_bdev *bdev); 438 439 static int 440 vbdev_ut_module_init(void) 441 { 442 return 0; 443 } 444 445 static void 446 vbdev_ut_module_fini(void) 447 { 448 } 449 450 struct spdk_bdev_module vbdev_ut_if = { 451 .name = "vbdev_ut", 452 .module_init = vbdev_ut_module_init, 453 .module_fini = vbdev_ut_module_fini, 454 .examine_config = vbdev_ut_examine, 455 }; 456 457 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 458 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 459 460 static void 461 vbdev_ut_examine(struct spdk_bdev *bdev) 462 { 463 spdk_bdev_module_examine_done(&vbdev_ut_if); 464 } 465 466 static struct spdk_bdev * 467 allocate_bdev(char *name) 468 { 469 struct spdk_bdev *bdev; 470 int rc; 471 472 bdev = calloc(1, sizeof(*bdev)); 473 SPDK_CU_ASSERT_FATAL(bdev != NULL); 474 475 bdev->name = name; 476 bdev->fn_table = &fn_table; 477 bdev->module = &bdev_ut_if; 478 bdev->blockcnt = 1024; 479 bdev->blocklen = 512; 480 481 rc = spdk_bdev_register(bdev); 482 CU_ASSERT(rc == 0); 483 484 return bdev; 485 } 486 487 static struct spdk_bdev * 488 allocate_vbdev(char *name) 489 { 490 struct spdk_bdev *bdev; 491 int rc; 492 493 bdev = calloc(1, sizeof(*bdev)); 494 SPDK_CU_ASSERT_FATAL(bdev != NULL); 495 496 bdev->name = name; 497 bdev->fn_table = &fn_table; 498 bdev->module = &vbdev_ut_if; 499 500 rc = spdk_bdev_register(bdev); 501 CU_ASSERT(rc == 0); 502 503 return bdev; 504 } 505 506 static void 507 free_bdev(struct spdk_bdev *bdev) 508 { 509 spdk_bdev_unregister(bdev, NULL, NULL); 510 poll_threads(); 511 memset(bdev, 0xFF, sizeof(*bdev)); 512 free(bdev); 513 } 514 515 static void 516 free_vbdev(struct spdk_bdev *bdev) 517 { 518 spdk_bdev_unregister(bdev, NULL, NULL); 519 poll_threads(); 520 memset(bdev, 0xFF, sizeof(*bdev)); 521 free(bdev); 522 } 523 524 static void 525 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 526 { 527 const char *bdev_name; 528 529 CU_ASSERT(bdev != NULL); 530 CU_ASSERT(rc == 0); 531 bdev_name = spdk_bdev_get_name(bdev); 532 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 533 534 free(stat); 535 536 *(bool *)cb_arg = true; 537 } 538 539 static void 540 bdev_unregister_cb(void *cb_arg, int rc) 541 { 542 g_unregister_arg = cb_arg; 543 g_unregister_rc = rc; 544 } 545 546 static void 547 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 548 { 549 } 550 551 static void 552 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 553 { 554 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 555 556 g_event_type1 = type; 557 if (SPDK_BDEV_EVENT_REMOVE == type) { 558 spdk_bdev_close(desc); 559 } 560 } 561 562 static void 563 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 564 { 565 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 566 567 g_event_type2 = type; 568 if (SPDK_BDEV_EVENT_REMOVE == type) { 569 spdk_bdev_close(desc); 570 } 571 } 572 573 static void 574 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 575 { 576 g_event_type3 = type; 577 } 578 579 static void 580 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 581 { 582 g_event_type4 = type; 583 } 584 585 static void 586 get_device_stat_test(void) 587 { 588 struct spdk_bdev *bdev; 589 struct spdk_bdev_io_stat *stat; 590 bool done; 591 592 bdev = allocate_bdev("bdev0"); 593 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 594 if (stat == NULL) { 595 free_bdev(bdev); 596 return; 597 } 598 599 done = false; 600 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 601 while (!done) { poll_threads(); } 602 603 free_bdev(bdev); 604 } 605 606 static void 607 open_write_test(void) 608 { 609 struct spdk_bdev *bdev[9]; 610 struct spdk_bdev_desc *desc[9] = {}; 611 int rc; 612 613 /* 614 * Create a tree of bdevs to test various open w/ write cases. 615 * 616 * bdev0 through bdev3 are physical block devices, such as NVMe 617 * namespaces or Ceph block devices. 618 * 619 * bdev4 is a virtual bdev with multiple base bdevs. This models 620 * caching or RAID use cases. 621 * 622 * bdev5 through bdev7 are all virtual bdevs with the same base 623 * bdev (except bdev7). This models partitioning or logical volume 624 * use cases. 625 * 626 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 627 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 628 * models caching, RAID, partitioning or logical volumes use cases. 629 * 630 * bdev8 is a virtual bdev with multiple base bdevs, but these 631 * base bdevs are themselves virtual bdevs. 632 * 633 * bdev8 634 * | 635 * +----------+ 636 * | | 637 * bdev4 bdev5 bdev6 bdev7 638 * | | | | 639 * +---+---+ +---+ + +---+---+ 640 * | | \ | / \ 641 * bdev0 bdev1 bdev2 bdev3 642 */ 643 644 bdev[0] = allocate_bdev("bdev0"); 645 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 646 CU_ASSERT(rc == 0); 647 648 bdev[1] = allocate_bdev("bdev1"); 649 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 650 CU_ASSERT(rc == 0); 651 652 bdev[2] = allocate_bdev("bdev2"); 653 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 654 CU_ASSERT(rc == 0); 655 656 bdev[3] = allocate_bdev("bdev3"); 657 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 658 CU_ASSERT(rc == 0); 659 660 bdev[4] = allocate_vbdev("bdev4"); 661 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 662 CU_ASSERT(rc == 0); 663 664 bdev[5] = allocate_vbdev("bdev5"); 665 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 666 CU_ASSERT(rc == 0); 667 668 bdev[6] = allocate_vbdev("bdev6"); 669 670 bdev[7] = allocate_vbdev("bdev7"); 671 672 bdev[8] = allocate_vbdev("bdev8"); 673 674 /* Open bdev0 read-only. This should succeed. */ 675 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 676 CU_ASSERT(rc == 0); 677 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 678 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 679 spdk_bdev_close(desc[0]); 680 681 /* 682 * Open bdev1 read/write. This should fail since bdev1 has been claimed 683 * by a vbdev module. 684 */ 685 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 686 CU_ASSERT(rc == -EPERM); 687 688 /* 689 * Open bdev4 read/write. This should fail since bdev3 has been claimed 690 * by a vbdev module. 691 */ 692 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 693 CU_ASSERT(rc == -EPERM); 694 695 /* Open bdev4 read-only. This should succeed. */ 696 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 697 CU_ASSERT(rc == 0); 698 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 699 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 700 spdk_bdev_close(desc[4]); 701 702 /* 703 * Open bdev8 read/write. This should succeed since it is a leaf 704 * bdev. 705 */ 706 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 707 CU_ASSERT(rc == 0); 708 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 709 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 710 spdk_bdev_close(desc[8]); 711 712 /* 713 * Open bdev5 read/write. This should fail since bdev4 has been claimed 714 * by a vbdev module. 715 */ 716 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 717 CU_ASSERT(rc == -EPERM); 718 719 /* Open bdev4 read-only. This should succeed. */ 720 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 721 CU_ASSERT(rc == 0); 722 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 723 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 724 spdk_bdev_close(desc[5]); 725 726 free_vbdev(bdev[8]); 727 728 free_vbdev(bdev[5]); 729 free_vbdev(bdev[6]); 730 free_vbdev(bdev[7]); 731 732 free_vbdev(bdev[4]); 733 734 free_bdev(bdev[0]); 735 free_bdev(bdev[1]); 736 free_bdev(bdev[2]); 737 free_bdev(bdev[3]); 738 } 739 740 static void 741 claim_test(void) 742 { 743 struct spdk_bdev *bdev; 744 struct spdk_bdev_desc *desc, *open_desc; 745 int rc; 746 uint32_t count; 747 748 /* 749 * A vbdev that uses a read-only bdev may need it to remain read-only. 750 * To do so, it opens the bdev read-only, then claims it without 751 * passing a spdk_bdev_desc. 752 */ 753 bdev = allocate_bdev("bdev0"); 754 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 755 CU_ASSERT(rc == 0); 756 CU_ASSERT(desc->write == false); 757 758 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 759 CU_ASSERT(rc == 0); 760 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 761 762 /* There should be only one open descriptor and it should still be ro */ 763 count = 0; 764 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 765 CU_ASSERT(open_desc == desc); 766 CU_ASSERT(!open_desc->write); 767 count++; 768 } 769 CU_ASSERT(count == 1); 770 771 /* A read-only bdev is upgraded to read-write if desc is passed. */ 772 spdk_bdev_module_release_bdev(bdev); 773 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 774 CU_ASSERT(rc == 0); 775 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 776 777 /* There should be only one open descriptor and it should be rw */ 778 count = 0; 779 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 780 CU_ASSERT(open_desc == desc); 781 CU_ASSERT(open_desc->write); 782 count++; 783 } 784 CU_ASSERT(count == 1); 785 786 spdk_bdev_close(desc); 787 free_bdev(bdev); 788 } 789 790 static void 791 bytes_to_blocks_test(void) 792 { 793 struct spdk_bdev bdev; 794 uint64_t offset_blocks, num_blocks; 795 796 memset(&bdev, 0, sizeof(bdev)); 797 798 bdev.blocklen = 512; 799 800 /* All parameters valid */ 801 offset_blocks = 0; 802 num_blocks = 0; 803 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 804 CU_ASSERT(offset_blocks == 1); 805 CU_ASSERT(num_blocks == 2); 806 807 /* Offset not a block multiple */ 808 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 809 810 /* Length not a block multiple */ 811 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 812 813 /* In case blocklen not the power of two */ 814 bdev.blocklen = 100; 815 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 816 CU_ASSERT(offset_blocks == 1); 817 CU_ASSERT(num_blocks == 2); 818 819 /* Offset not a block multiple */ 820 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 821 822 /* Length not a block multiple */ 823 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 824 } 825 826 static void 827 num_blocks_test(void) 828 { 829 struct spdk_bdev bdev; 830 struct spdk_bdev_desc *desc = NULL; 831 int rc; 832 833 memset(&bdev, 0, sizeof(bdev)); 834 bdev.name = "num_blocks"; 835 bdev.fn_table = &fn_table; 836 bdev.module = &bdev_ut_if; 837 spdk_bdev_register(&bdev); 838 spdk_bdev_notify_blockcnt_change(&bdev, 50); 839 840 /* Growing block number */ 841 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 842 /* Shrinking block number */ 843 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 844 845 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 846 CU_ASSERT(rc == 0); 847 SPDK_CU_ASSERT_FATAL(desc != NULL); 848 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 849 850 /* Growing block number */ 851 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 852 /* Shrinking block number */ 853 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 854 855 g_event_type1 = 0xFF; 856 /* Growing block number */ 857 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 858 859 poll_threads(); 860 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 861 862 g_event_type1 = 0xFF; 863 /* Growing block number and closing */ 864 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 865 866 spdk_bdev_close(desc); 867 spdk_bdev_unregister(&bdev, NULL, NULL); 868 869 poll_threads(); 870 871 /* Callback is not called for closed device */ 872 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 873 } 874 875 static void 876 io_valid_test(void) 877 { 878 struct spdk_bdev bdev; 879 880 memset(&bdev, 0, sizeof(bdev)); 881 882 bdev.blocklen = 512; 883 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 884 885 spdk_bdev_notify_blockcnt_change(&bdev, 100); 886 887 /* All parameters valid */ 888 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 889 890 /* Last valid block */ 891 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 892 893 /* Offset past end of bdev */ 894 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 895 896 /* Offset + length past end of bdev */ 897 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 898 899 /* Offset near end of uint64_t range (2^64 - 1) */ 900 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 901 902 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 903 } 904 905 static void 906 alias_add_del_test(void) 907 { 908 struct spdk_bdev *bdev[3]; 909 int rc; 910 911 /* Creating and registering bdevs */ 912 bdev[0] = allocate_bdev("bdev0"); 913 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 914 915 bdev[1] = allocate_bdev("bdev1"); 916 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 917 918 bdev[2] = allocate_bdev("bdev2"); 919 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 920 921 poll_threads(); 922 923 /* 924 * Trying adding an alias identical to name. 925 * Alias is identical to name, so it can not be added to aliases list 926 */ 927 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 928 CU_ASSERT(rc == -EEXIST); 929 930 /* 931 * Trying to add empty alias, 932 * this one should fail 933 */ 934 rc = spdk_bdev_alias_add(bdev[0], NULL); 935 CU_ASSERT(rc == -EINVAL); 936 937 /* Trying adding same alias to two different registered bdevs */ 938 939 /* Alias is used first time, so this one should pass */ 940 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 941 CU_ASSERT(rc == 0); 942 943 /* Alias was added to another bdev, so this one should fail */ 944 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 945 CU_ASSERT(rc == -EEXIST); 946 947 /* Alias is used first time, so this one should pass */ 948 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 949 CU_ASSERT(rc == 0); 950 951 /* Trying removing an alias from registered bdevs */ 952 953 /* Alias is not on a bdev aliases list, so this one should fail */ 954 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 955 CU_ASSERT(rc == -ENOENT); 956 957 /* Alias is present on a bdev aliases list, so this one should pass */ 958 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 959 CU_ASSERT(rc == 0); 960 961 /* Alias is present on a bdev aliases list, so this one should pass */ 962 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 963 CU_ASSERT(rc == 0); 964 965 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 966 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 967 CU_ASSERT(rc != 0); 968 969 /* Trying to del all alias from empty alias list */ 970 spdk_bdev_alias_del_all(bdev[2]); 971 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 972 973 /* Trying to del all alias from non-empty alias list */ 974 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 975 CU_ASSERT(rc == 0); 976 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 977 CU_ASSERT(rc == 0); 978 spdk_bdev_alias_del_all(bdev[2]); 979 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 980 981 /* Unregister and free bdevs */ 982 spdk_bdev_unregister(bdev[0], NULL, NULL); 983 spdk_bdev_unregister(bdev[1], NULL, NULL); 984 spdk_bdev_unregister(bdev[2], NULL, NULL); 985 986 poll_threads(); 987 988 free(bdev[0]); 989 free(bdev[1]); 990 free(bdev[2]); 991 } 992 993 static void 994 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 995 { 996 g_io_done = true; 997 g_io_status = bdev_io->internal.status; 998 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 999 (bdev_io->u.bdev.zcopy.start)) { 1000 g_zcopy_bdev_io = bdev_io; 1001 } else { 1002 spdk_bdev_free_io(bdev_io); 1003 g_zcopy_bdev_io = NULL; 1004 } 1005 } 1006 1007 static void 1008 bdev_init_cb(void *arg, int rc) 1009 { 1010 CU_ASSERT(rc == 0); 1011 } 1012 1013 static void 1014 bdev_fini_cb(void *arg) 1015 { 1016 } 1017 1018 struct bdev_ut_io_wait_entry { 1019 struct spdk_bdev_io_wait_entry entry; 1020 struct spdk_io_channel *io_ch; 1021 struct spdk_bdev_desc *desc; 1022 bool submitted; 1023 }; 1024 1025 static void 1026 io_wait_cb(void *arg) 1027 { 1028 struct bdev_ut_io_wait_entry *entry = arg; 1029 int rc; 1030 1031 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1032 CU_ASSERT(rc == 0); 1033 entry->submitted = true; 1034 } 1035 1036 static void 1037 bdev_io_types_test(void) 1038 { 1039 struct spdk_bdev *bdev; 1040 struct spdk_bdev_desc *desc = NULL; 1041 struct spdk_io_channel *io_ch; 1042 struct spdk_bdev_opts bdev_opts = {}; 1043 int rc; 1044 1045 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1046 bdev_opts.bdev_io_pool_size = 4; 1047 bdev_opts.bdev_io_cache_size = 2; 1048 1049 rc = spdk_bdev_set_opts(&bdev_opts); 1050 CU_ASSERT(rc == 0); 1051 spdk_bdev_initialize(bdev_init_cb, NULL); 1052 poll_threads(); 1053 1054 bdev = allocate_bdev("bdev0"); 1055 1056 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1057 CU_ASSERT(rc == 0); 1058 poll_threads(); 1059 SPDK_CU_ASSERT_FATAL(desc != NULL); 1060 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1061 io_ch = spdk_bdev_get_io_channel(desc); 1062 CU_ASSERT(io_ch != NULL); 1063 1064 /* WRITE and WRITE ZEROES are not supported */ 1065 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1066 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1067 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1068 CU_ASSERT(rc == -ENOTSUP); 1069 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1070 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1071 1072 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1073 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1074 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1075 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1076 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1077 CU_ASSERT(rc == -ENOTSUP); 1078 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1079 CU_ASSERT(rc == -ENOTSUP); 1080 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1081 CU_ASSERT(rc == -ENOTSUP); 1082 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1083 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1084 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1085 1086 spdk_put_io_channel(io_ch); 1087 spdk_bdev_close(desc); 1088 free_bdev(bdev); 1089 spdk_bdev_finish(bdev_fini_cb, NULL); 1090 poll_threads(); 1091 } 1092 1093 static void 1094 bdev_io_wait_test(void) 1095 { 1096 struct spdk_bdev *bdev; 1097 struct spdk_bdev_desc *desc = NULL; 1098 struct spdk_io_channel *io_ch; 1099 struct spdk_bdev_opts bdev_opts = {}; 1100 struct bdev_ut_io_wait_entry io_wait_entry; 1101 struct bdev_ut_io_wait_entry io_wait_entry2; 1102 int rc; 1103 1104 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1105 bdev_opts.bdev_io_pool_size = 4; 1106 bdev_opts.bdev_io_cache_size = 2; 1107 1108 rc = spdk_bdev_set_opts(&bdev_opts); 1109 CU_ASSERT(rc == 0); 1110 spdk_bdev_initialize(bdev_init_cb, NULL); 1111 poll_threads(); 1112 1113 bdev = allocate_bdev("bdev0"); 1114 1115 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1116 CU_ASSERT(rc == 0); 1117 poll_threads(); 1118 SPDK_CU_ASSERT_FATAL(desc != NULL); 1119 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1120 io_ch = spdk_bdev_get_io_channel(desc); 1121 CU_ASSERT(io_ch != NULL); 1122 1123 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1124 CU_ASSERT(rc == 0); 1125 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1126 CU_ASSERT(rc == 0); 1127 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1128 CU_ASSERT(rc == 0); 1129 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1130 CU_ASSERT(rc == 0); 1131 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1132 1133 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1134 CU_ASSERT(rc == -ENOMEM); 1135 1136 io_wait_entry.entry.bdev = bdev; 1137 io_wait_entry.entry.cb_fn = io_wait_cb; 1138 io_wait_entry.entry.cb_arg = &io_wait_entry; 1139 io_wait_entry.io_ch = io_ch; 1140 io_wait_entry.desc = desc; 1141 io_wait_entry.submitted = false; 1142 /* Cannot use the same io_wait_entry for two different calls. */ 1143 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1144 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1145 1146 /* Queue two I/O waits. */ 1147 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1148 CU_ASSERT(rc == 0); 1149 CU_ASSERT(io_wait_entry.submitted == false); 1150 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1151 CU_ASSERT(rc == 0); 1152 CU_ASSERT(io_wait_entry2.submitted == false); 1153 1154 stub_complete_io(1); 1155 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1156 CU_ASSERT(io_wait_entry.submitted == true); 1157 CU_ASSERT(io_wait_entry2.submitted == false); 1158 1159 stub_complete_io(1); 1160 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1161 CU_ASSERT(io_wait_entry2.submitted == true); 1162 1163 stub_complete_io(4); 1164 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1165 1166 spdk_put_io_channel(io_ch); 1167 spdk_bdev_close(desc); 1168 free_bdev(bdev); 1169 spdk_bdev_finish(bdev_fini_cb, NULL); 1170 poll_threads(); 1171 } 1172 1173 static void 1174 bdev_io_spans_split_test(void) 1175 { 1176 struct spdk_bdev bdev; 1177 struct spdk_bdev_io bdev_io; 1178 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1179 1180 memset(&bdev, 0, sizeof(bdev)); 1181 bdev_io.u.bdev.iovs = iov; 1182 1183 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1184 bdev.optimal_io_boundary = 0; 1185 bdev.max_segment_size = 0; 1186 bdev.max_num_segments = 0; 1187 bdev_io.bdev = &bdev; 1188 1189 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1190 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1191 1192 bdev.split_on_optimal_io_boundary = true; 1193 bdev.optimal_io_boundary = 32; 1194 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1195 1196 /* RESETs are not based on LBAs - so this should return false. */ 1197 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1198 1199 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1200 bdev_io.u.bdev.offset_blocks = 0; 1201 bdev_io.u.bdev.num_blocks = 32; 1202 1203 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1204 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1205 1206 bdev_io.u.bdev.num_blocks = 33; 1207 1208 /* This I/O spans a boundary. */ 1209 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1210 1211 bdev_io.u.bdev.num_blocks = 32; 1212 bdev.max_segment_size = 512 * 32; 1213 bdev.max_num_segments = 1; 1214 bdev_io.u.bdev.iovcnt = 1; 1215 iov[0].iov_len = 512; 1216 1217 /* Does not cross and exceed max_size or max_segs */ 1218 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1219 1220 bdev.split_on_optimal_io_boundary = false; 1221 bdev.max_segment_size = 512; 1222 bdev.max_num_segments = 1; 1223 bdev_io.u.bdev.iovcnt = 2; 1224 1225 /* Exceed max_segs */ 1226 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1227 1228 bdev.max_num_segments = 2; 1229 iov[0].iov_len = 513; 1230 iov[1].iov_len = 512; 1231 1232 /* Exceed max_sizes */ 1233 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1234 } 1235 1236 static void 1237 bdev_io_boundary_split_test(void) 1238 { 1239 struct spdk_bdev *bdev; 1240 struct spdk_bdev_desc *desc = NULL; 1241 struct spdk_io_channel *io_ch; 1242 struct spdk_bdev_opts bdev_opts = {}; 1243 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1244 struct ut_expected_io *expected_io; 1245 void *md_buf = (void *)0xFF000000; 1246 uint64_t i; 1247 int rc; 1248 1249 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1250 bdev_opts.bdev_io_pool_size = 512; 1251 bdev_opts.bdev_io_cache_size = 64; 1252 1253 rc = spdk_bdev_set_opts(&bdev_opts); 1254 CU_ASSERT(rc == 0); 1255 spdk_bdev_initialize(bdev_init_cb, NULL); 1256 1257 bdev = allocate_bdev("bdev0"); 1258 1259 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1260 CU_ASSERT(rc == 0); 1261 SPDK_CU_ASSERT_FATAL(desc != NULL); 1262 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1263 io_ch = spdk_bdev_get_io_channel(desc); 1264 CU_ASSERT(io_ch != NULL); 1265 1266 bdev->optimal_io_boundary = 16; 1267 bdev->split_on_optimal_io_boundary = false; 1268 1269 g_io_done = false; 1270 1271 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1272 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1273 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1274 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1275 1276 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1277 CU_ASSERT(rc == 0); 1278 CU_ASSERT(g_io_done == false); 1279 1280 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1281 stub_complete_io(1); 1282 CU_ASSERT(g_io_done == true); 1283 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1284 1285 bdev->split_on_optimal_io_boundary = true; 1286 bdev->md_interleave = false; 1287 bdev->md_len = 8; 1288 1289 /* Now test that a single-vector command is split correctly. 1290 * Offset 14, length 8, payload 0xF000 1291 * Child - Offset 14, length 2, payload 0xF000 1292 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1293 * 1294 * Set up the expected values before calling spdk_bdev_read_blocks 1295 */ 1296 g_io_done = false; 1297 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1298 expected_io->md_buf = md_buf; 1299 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1300 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1301 1302 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1303 expected_io->md_buf = md_buf + 2 * 8; 1304 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1305 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1306 1307 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1308 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1309 14, 8, io_done, NULL); 1310 CU_ASSERT(rc == 0); 1311 CU_ASSERT(g_io_done == false); 1312 1313 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1314 stub_complete_io(2); 1315 CU_ASSERT(g_io_done == true); 1316 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1317 1318 /* Now set up a more complex, multi-vector command that needs to be split, 1319 * including splitting iovecs. 1320 */ 1321 iov[0].iov_base = (void *)0x10000; 1322 iov[0].iov_len = 512; 1323 iov[1].iov_base = (void *)0x20000; 1324 iov[1].iov_len = 20 * 512; 1325 iov[2].iov_base = (void *)0x30000; 1326 iov[2].iov_len = 11 * 512; 1327 1328 g_io_done = false; 1329 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1330 expected_io->md_buf = md_buf; 1331 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1332 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1333 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1334 1335 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1336 expected_io->md_buf = md_buf + 2 * 8; 1337 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1338 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1339 1340 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1341 expected_io->md_buf = md_buf + 18 * 8; 1342 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1343 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1344 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1345 1346 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1347 14, 32, io_done, NULL); 1348 CU_ASSERT(rc == 0); 1349 CU_ASSERT(g_io_done == false); 1350 1351 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1352 stub_complete_io(3); 1353 CU_ASSERT(g_io_done == true); 1354 1355 /* Test multi vector command that needs to be split by strip and then needs to be 1356 * split further due to the capacity of child iovs. 1357 */ 1358 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1359 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1360 iov[i].iov_len = 512; 1361 } 1362 1363 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1364 g_io_done = false; 1365 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1366 BDEV_IO_NUM_CHILD_IOV); 1367 expected_io->md_buf = md_buf; 1368 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1369 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1370 } 1371 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1372 1373 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1374 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1375 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1376 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1377 ut_expected_io_set_iov(expected_io, i, 1378 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1379 } 1380 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1381 1382 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1383 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1384 CU_ASSERT(rc == 0); 1385 CU_ASSERT(g_io_done == false); 1386 1387 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1388 stub_complete_io(1); 1389 CU_ASSERT(g_io_done == false); 1390 1391 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1392 stub_complete_io(1); 1393 CU_ASSERT(g_io_done == true); 1394 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1395 1396 /* Test multi vector command that needs to be split by strip and then needs to be 1397 * split further due to the capacity of child iovs. In this case, the length of 1398 * the rest of iovec array with an I/O boundary is the multiple of block size. 1399 */ 1400 1401 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1402 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1403 */ 1404 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1405 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1406 iov[i].iov_len = 512; 1407 } 1408 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1409 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1410 iov[i].iov_len = 256; 1411 } 1412 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1413 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1414 1415 /* Add an extra iovec to trigger split */ 1416 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1417 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1418 1419 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1420 g_io_done = false; 1421 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1422 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1423 expected_io->md_buf = md_buf; 1424 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1425 ut_expected_io_set_iov(expected_io, i, 1426 (void *)((i + 1) * 0x10000), 512); 1427 } 1428 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1429 ut_expected_io_set_iov(expected_io, i, 1430 (void *)((i + 1) * 0x10000), 256); 1431 } 1432 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1433 1434 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1435 1, 1); 1436 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1437 ut_expected_io_set_iov(expected_io, 0, 1438 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1439 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1440 1441 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1442 1, 1); 1443 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1444 ut_expected_io_set_iov(expected_io, 0, 1445 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1446 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1447 1448 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1449 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1450 CU_ASSERT(rc == 0); 1451 CU_ASSERT(g_io_done == false); 1452 1453 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1454 stub_complete_io(1); 1455 CU_ASSERT(g_io_done == false); 1456 1457 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1458 stub_complete_io(2); 1459 CU_ASSERT(g_io_done == true); 1460 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1461 1462 /* Test multi vector command that needs to be split by strip and then needs to be 1463 * split further due to the capacity of child iovs, the child request offset should 1464 * be rewind to last aligned offset and go success without error. 1465 */ 1466 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1467 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1468 iov[i].iov_len = 512; 1469 } 1470 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1471 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1472 1473 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1474 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1475 1476 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1477 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1478 1479 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1480 g_io_done = false; 1481 g_io_status = 0; 1482 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1483 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1484 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1485 expected_io->md_buf = md_buf; 1486 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1487 ut_expected_io_set_iov(expected_io, i, 1488 (void *)((i + 1) * 0x10000), 512); 1489 } 1490 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1491 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1492 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1493 1, 2); 1494 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1495 ut_expected_io_set_iov(expected_io, 0, 1496 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1497 ut_expected_io_set_iov(expected_io, 1, 1498 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1499 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1500 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1501 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1502 1, 1); 1503 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1504 ut_expected_io_set_iov(expected_io, 0, 1505 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1506 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1507 1508 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1509 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1510 CU_ASSERT(rc == 0); 1511 CU_ASSERT(g_io_done == false); 1512 1513 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1514 stub_complete_io(1); 1515 CU_ASSERT(g_io_done == false); 1516 1517 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1518 stub_complete_io(2); 1519 CU_ASSERT(g_io_done == true); 1520 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1521 1522 /* Test multi vector command that needs to be split due to the IO boundary and 1523 * the capacity of child iovs. Especially test the case when the command is 1524 * split due to the capacity of child iovs, the tail address is not aligned with 1525 * block size and is rewinded to the aligned address. 1526 * 1527 * The iovecs used in read request is complex but is based on the data 1528 * collected in the real issue. We change the base addresses but keep the lengths 1529 * not to loose the credibility of the test. 1530 */ 1531 bdev->optimal_io_boundary = 128; 1532 g_io_done = false; 1533 g_io_status = 0; 1534 1535 for (i = 0; i < 31; i++) { 1536 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1537 iov[i].iov_len = 1024; 1538 } 1539 iov[31].iov_base = (void *)0xFEED1F00000; 1540 iov[31].iov_len = 32768; 1541 iov[32].iov_base = (void *)0xFEED2000000; 1542 iov[32].iov_len = 160; 1543 iov[33].iov_base = (void *)0xFEED2100000; 1544 iov[33].iov_len = 4096; 1545 iov[34].iov_base = (void *)0xFEED2200000; 1546 iov[34].iov_len = 4096; 1547 iov[35].iov_base = (void *)0xFEED2300000; 1548 iov[35].iov_len = 4096; 1549 iov[36].iov_base = (void *)0xFEED2400000; 1550 iov[36].iov_len = 4096; 1551 iov[37].iov_base = (void *)0xFEED2500000; 1552 iov[37].iov_len = 4096; 1553 iov[38].iov_base = (void *)0xFEED2600000; 1554 iov[38].iov_len = 4096; 1555 iov[39].iov_base = (void *)0xFEED2700000; 1556 iov[39].iov_len = 4096; 1557 iov[40].iov_base = (void *)0xFEED2800000; 1558 iov[40].iov_len = 4096; 1559 iov[41].iov_base = (void *)0xFEED2900000; 1560 iov[41].iov_len = 4096; 1561 iov[42].iov_base = (void *)0xFEED2A00000; 1562 iov[42].iov_len = 4096; 1563 iov[43].iov_base = (void *)0xFEED2B00000; 1564 iov[43].iov_len = 12288; 1565 iov[44].iov_base = (void *)0xFEED2C00000; 1566 iov[44].iov_len = 8192; 1567 iov[45].iov_base = (void *)0xFEED2F00000; 1568 iov[45].iov_len = 4096; 1569 iov[46].iov_base = (void *)0xFEED3000000; 1570 iov[46].iov_len = 4096; 1571 iov[47].iov_base = (void *)0xFEED3100000; 1572 iov[47].iov_len = 4096; 1573 iov[48].iov_base = (void *)0xFEED3200000; 1574 iov[48].iov_len = 24576; 1575 iov[49].iov_base = (void *)0xFEED3300000; 1576 iov[49].iov_len = 16384; 1577 iov[50].iov_base = (void *)0xFEED3400000; 1578 iov[50].iov_len = 12288; 1579 iov[51].iov_base = (void *)0xFEED3500000; 1580 iov[51].iov_len = 4096; 1581 iov[52].iov_base = (void *)0xFEED3600000; 1582 iov[52].iov_len = 4096; 1583 iov[53].iov_base = (void *)0xFEED3700000; 1584 iov[53].iov_len = 4096; 1585 iov[54].iov_base = (void *)0xFEED3800000; 1586 iov[54].iov_len = 28672; 1587 iov[55].iov_base = (void *)0xFEED3900000; 1588 iov[55].iov_len = 20480; 1589 iov[56].iov_base = (void *)0xFEED3A00000; 1590 iov[56].iov_len = 4096; 1591 iov[57].iov_base = (void *)0xFEED3B00000; 1592 iov[57].iov_len = 12288; 1593 iov[58].iov_base = (void *)0xFEED3C00000; 1594 iov[58].iov_len = 4096; 1595 iov[59].iov_base = (void *)0xFEED3D00000; 1596 iov[59].iov_len = 4096; 1597 iov[60].iov_base = (void *)0xFEED3E00000; 1598 iov[60].iov_len = 352; 1599 1600 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1601 * of child iovs, 1602 */ 1603 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1604 expected_io->md_buf = md_buf; 1605 for (i = 0; i < 32; i++) { 1606 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1607 } 1608 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1609 1610 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1611 * split by the IO boundary requirement. 1612 */ 1613 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1614 expected_io->md_buf = md_buf + 126 * 8; 1615 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1616 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1617 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1618 1619 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1620 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1621 */ 1622 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1623 expected_io->md_buf = md_buf + 128 * 8; 1624 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1625 iov[33].iov_len - 864); 1626 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1627 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1628 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1629 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1630 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1631 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1632 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1633 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1634 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1635 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1636 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1637 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1638 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1640 1641 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1642 * first 864 bytes of iov[52] split by the IO boundary requirement. 1643 */ 1644 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1645 expected_io->md_buf = md_buf + 256 * 8; 1646 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1647 iov[46].iov_len - 864); 1648 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1649 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1650 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1651 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1652 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1653 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1654 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1655 1656 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1657 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1658 */ 1659 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1660 expected_io->md_buf = md_buf + 384 * 8; 1661 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1662 iov[52].iov_len - 864); 1663 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1664 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1665 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1666 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1667 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1668 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1669 1670 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1671 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1672 */ 1673 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1674 expected_io->md_buf = md_buf + 512 * 8; 1675 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1676 iov[57].iov_len - 4960); 1677 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1678 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1679 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1680 1681 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1682 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1683 expected_io->md_buf = md_buf + 542 * 8; 1684 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1685 iov[59].iov_len - 3936); 1686 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1687 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1688 1689 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1690 0, 543, io_done, NULL); 1691 CU_ASSERT(rc == 0); 1692 CU_ASSERT(g_io_done == false); 1693 1694 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1695 stub_complete_io(1); 1696 CU_ASSERT(g_io_done == false); 1697 1698 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1699 stub_complete_io(5); 1700 CU_ASSERT(g_io_done == false); 1701 1702 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1703 stub_complete_io(1); 1704 CU_ASSERT(g_io_done == true); 1705 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1706 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1707 1708 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1709 * split, so test that. 1710 */ 1711 bdev->optimal_io_boundary = 15; 1712 g_io_done = false; 1713 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1714 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1715 1716 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1717 CU_ASSERT(rc == 0); 1718 CU_ASSERT(g_io_done == false); 1719 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1720 stub_complete_io(1); 1721 CU_ASSERT(g_io_done == true); 1722 1723 /* Test an UNMAP. This should also not be split. */ 1724 bdev->optimal_io_boundary = 16; 1725 g_io_done = false; 1726 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1727 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1728 1729 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1730 CU_ASSERT(rc == 0); 1731 CU_ASSERT(g_io_done == false); 1732 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1733 stub_complete_io(1); 1734 CU_ASSERT(g_io_done == true); 1735 1736 /* Test a FLUSH. This should also not be split. */ 1737 bdev->optimal_io_boundary = 16; 1738 g_io_done = false; 1739 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1740 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1741 1742 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1743 CU_ASSERT(rc == 0); 1744 CU_ASSERT(g_io_done == false); 1745 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1746 stub_complete_io(1); 1747 CU_ASSERT(g_io_done == true); 1748 1749 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1750 1751 /* Children requests return an error status */ 1752 bdev->optimal_io_boundary = 16; 1753 iov[0].iov_base = (void *)0x10000; 1754 iov[0].iov_len = 512 * 64; 1755 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1756 g_io_done = false; 1757 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1758 1759 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1760 CU_ASSERT(rc == 0); 1761 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1762 stub_complete_io(4); 1763 CU_ASSERT(g_io_done == false); 1764 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1765 stub_complete_io(1); 1766 CU_ASSERT(g_io_done == true); 1767 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1768 1769 /* Test if a multi vector command terminated with failure before continuing 1770 * splitting process when one of child I/O failed. 1771 * The multi vector command is as same as the above that needs to be split by strip 1772 * and then needs to be split further due to the capacity of child iovs. 1773 */ 1774 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1775 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1776 iov[i].iov_len = 512; 1777 } 1778 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1779 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1780 1781 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1782 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1783 1784 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1785 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1786 1787 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1788 1789 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1790 g_io_done = false; 1791 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1792 1793 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1794 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1795 CU_ASSERT(rc == 0); 1796 CU_ASSERT(g_io_done == false); 1797 1798 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1799 stub_complete_io(1); 1800 CU_ASSERT(g_io_done == true); 1801 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1802 1803 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1804 1805 /* for this test we will create the following conditions to hit the code path where 1806 * we are trying to send and IO following a split that has no iovs because we had to 1807 * trim them for alignment reasons. 1808 * 1809 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1810 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1811 * position 30 and overshoot by 0x2e. 1812 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1813 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1814 * which eliniates that vector so we just send the first split IO with 30 vectors 1815 * and let the completion pick up the last 2 vectors. 1816 */ 1817 bdev->optimal_io_boundary = 32; 1818 bdev->split_on_optimal_io_boundary = true; 1819 g_io_done = false; 1820 1821 /* Init all parent IOVs to 0x212 */ 1822 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1823 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1824 iov[i].iov_len = 0x212; 1825 } 1826 1827 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1828 BDEV_IO_NUM_CHILD_IOV - 1); 1829 /* expect 0-29 to be 1:1 with the parent iov */ 1830 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1831 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1832 } 1833 1834 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1835 * where 0x1e is the amount we overshot the 16K boundary 1836 */ 1837 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1838 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1839 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1840 1841 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1842 * shortened that take it to the next boundary and then a final one to get us to 1843 * 0x4200 bytes for the IO. 1844 */ 1845 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1846 BDEV_IO_NUM_CHILD_IOV, 2); 1847 /* position 30 picked up the remaining bytes to the next boundary */ 1848 ut_expected_io_set_iov(expected_io, 0, 1849 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1850 1851 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1852 ut_expected_io_set_iov(expected_io, 1, 1853 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1854 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1855 1856 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1857 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1858 CU_ASSERT(rc == 0); 1859 CU_ASSERT(g_io_done == false); 1860 1861 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1862 stub_complete_io(1); 1863 CU_ASSERT(g_io_done == false); 1864 1865 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1866 stub_complete_io(1); 1867 CU_ASSERT(g_io_done == true); 1868 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1869 1870 spdk_put_io_channel(io_ch); 1871 spdk_bdev_close(desc); 1872 free_bdev(bdev); 1873 spdk_bdev_finish(bdev_fini_cb, NULL); 1874 poll_threads(); 1875 } 1876 1877 static void 1878 bdev_io_max_size_and_segment_split_test(void) 1879 { 1880 struct spdk_bdev *bdev; 1881 struct spdk_bdev_desc *desc = NULL; 1882 struct spdk_io_channel *io_ch; 1883 struct spdk_bdev_opts bdev_opts = {}; 1884 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1885 struct ut_expected_io *expected_io; 1886 uint64_t i; 1887 int rc; 1888 1889 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1890 bdev_opts.bdev_io_pool_size = 512; 1891 bdev_opts.bdev_io_cache_size = 64; 1892 1893 bdev_opts.opts_size = sizeof(bdev_opts); 1894 rc = spdk_bdev_set_opts(&bdev_opts); 1895 CU_ASSERT(rc == 0); 1896 spdk_bdev_initialize(bdev_init_cb, NULL); 1897 1898 bdev = allocate_bdev("bdev0"); 1899 1900 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1901 CU_ASSERT(rc == 0); 1902 SPDK_CU_ASSERT_FATAL(desc != NULL); 1903 io_ch = spdk_bdev_get_io_channel(desc); 1904 CU_ASSERT(io_ch != NULL); 1905 1906 bdev->split_on_optimal_io_boundary = false; 1907 bdev->optimal_io_boundary = 0; 1908 1909 /* Case 0 max_num_segments == 0. 1910 * but segment size 2 * 512 > 512 1911 */ 1912 bdev->max_segment_size = 512; 1913 bdev->max_num_segments = 0; 1914 g_io_done = false; 1915 1916 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1917 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1918 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1919 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1920 1921 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1922 CU_ASSERT(rc == 0); 1923 CU_ASSERT(g_io_done == false); 1924 1925 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1926 stub_complete_io(1); 1927 CU_ASSERT(g_io_done == true); 1928 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1929 1930 /* Case 1 max_segment_size == 0 1931 * but iov num 2 > 1. 1932 */ 1933 bdev->max_segment_size = 0; 1934 bdev->max_num_segments = 1; 1935 g_io_done = false; 1936 1937 iov[0].iov_base = (void *)0x10000; 1938 iov[0].iov_len = 512; 1939 iov[1].iov_base = (void *)0x20000; 1940 iov[1].iov_len = 8 * 512; 1941 1942 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1943 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1944 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1945 1946 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1947 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1948 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1949 1950 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1951 CU_ASSERT(rc == 0); 1952 CU_ASSERT(g_io_done == false); 1953 1954 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1955 stub_complete_io(2); 1956 CU_ASSERT(g_io_done == true); 1957 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1958 1959 /* Test that a non-vector command is split correctly. 1960 * Set up the expected values before calling spdk_bdev_read_blocks 1961 */ 1962 bdev->max_segment_size = 512; 1963 bdev->max_num_segments = 1; 1964 g_io_done = false; 1965 1966 /* Child IO 0 */ 1967 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1968 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1969 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1970 1971 /* Child IO 1 */ 1972 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1973 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1974 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1975 1976 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1977 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1978 CU_ASSERT(rc == 0); 1979 CU_ASSERT(g_io_done == false); 1980 1981 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1982 stub_complete_io(2); 1983 CU_ASSERT(g_io_done == true); 1984 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1985 1986 /* Now set up a more complex, multi-vector command that needs to be split, 1987 * including splitting iovecs. 1988 */ 1989 bdev->max_segment_size = 2 * 512; 1990 bdev->max_num_segments = 1; 1991 g_io_done = false; 1992 1993 iov[0].iov_base = (void *)0x10000; 1994 iov[0].iov_len = 2 * 512; 1995 iov[1].iov_base = (void *)0x20000; 1996 iov[1].iov_len = 4 * 512; 1997 iov[2].iov_base = (void *)0x30000; 1998 iov[2].iov_len = 6 * 512; 1999 2000 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2001 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2002 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2003 2004 /* Split iov[1].size to 2 iov entries then split the segments */ 2005 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2006 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2007 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2008 2009 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2010 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2011 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2012 2013 /* Split iov[2].size to 3 iov entries then split the segments */ 2014 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2015 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2016 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2017 2018 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2019 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2020 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2021 2022 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2023 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2024 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2025 2026 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2027 CU_ASSERT(rc == 0); 2028 CU_ASSERT(g_io_done == false); 2029 2030 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2031 stub_complete_io(6); 2032 CU_ASSERT(g_io_done == true); 2033 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2034 2035 /* Test multi vector command that needs to be split by strip and then needs to be 2036 * split further due to the capacity of parent IO child iovs. 2037 */ 2038 bdev->max_segment_size = 512; 2039 bdev->max_num_segments = 1; 2040 g_io_done = false; 2041 2042 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2043 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2044 iov[i].iov_len = 512 * 2; 2045 } 2046 2047 /* Each input iov.size is split into 2 iovs, 2048 * half of the input iov can fill all child iov entries of a single IO. 2049 */ 2050 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2051 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2052 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2053 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2054 2055 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2056 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2057 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2058 } 2059 2060 /* The remaining iov is split in the second round */ 2061 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2062 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2063 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2064 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2065 2066 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2067 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2068 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2069 } 2070 2071 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2072 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2073 CU_ASSERT(rc == 0); 2074 CU_ASSERT(g_io_done == false); 2075 2076 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2077 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2078 CU_ASSERT(g_io_done == false); 2079 2080 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2081 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2082 CU_ASSERT(g_io_done == true); 2083 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2084 2085 /* A wrong case, a child IO that is divided does 2086 * not meet the principle of multiples of block size, 2087 * and exits with error 2088 */ 2089 bdev->max_segment_size = 512; 2090 bdev->max_num_segments = 1; 2091 g_io_done = false; 2092 2093 iov[0].iov_base = (void *)0x10000; 2094 iov[0].iov_len = 512 + 256; 2095 iov[1].iov_base = (void *)0x20000; 2096 iov[1].iov_len = 256; 2097 2098 /* iov[0] is split to 512 and 256. 2099 * 256 is less than a block size, and it is found 2100 * in the next round of split that it is the first child IO smaller than 2101 * the block size, so the error exit 2102 */ 2103 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2104 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2105 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2106 2107 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2108 CU_ASSERT(rc == 0); 2109 CU_ASSERT(g_io_done == false); 2110 2111 /* First child IO is OK */ 2112 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2113 stub_complete_io(1); 2114 CU_ASSERT(g_io_done == true); 2115 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2116 2117 /* error exit */ 2118 stub_complete_io(1); 2119 CU_ASSERT(g_io_done == true); 2120 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2121 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2122 2123 /* Test multi vector command that needs to be split by strip and then needs to be 2124 * split further due to the capacity of child iovs. 2125 * 2126 * In this case, the last two iovs need to be split, but it will exceed the capacity 2127 * of child iovs, so it needs to wait until the first batch completed. 2128 */ 2129 bdev->max_segment_size = 512; 2130 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2131 g_io_done = false; 2132 2133 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2134 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2135 iov[i].iov_len = 512; 2136 } 2137 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2138 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2139 iov[i].iov_len = 512 * 2; 2140 } 2141 2142 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2143 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2144 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2145 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2146 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2147 } 2148 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2149 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2150 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2151 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2152 2153 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2154 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2155 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2156 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2157 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2158 2159 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2160 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2161 CU_ASSERT(rc == 0); 2162 CU_ASSERT(g_io_done == false); 2163 2164 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2165 stub_complete_io(1); 2166 CU_ASSERT(g_io_done == false); 2167 2168 /* Next round */ 2169 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2170 stub_complete_io(1); 2171 CU_ASSERT(g_io_done == true); 2172 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2173 2174 /* This case is similar to the previous one, but the io composed of 2175 * the last few entries of child iov is not enough for a blocklen, so they 2176 * cannot be put into this IO, but wait until the next time. 2177 */ 2178 bdev->max_segment_size = 512; 2179 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2180 g_io_done = false; 2181 2182 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2183 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2184 iov[i].iov_len = 512; 2185 } 2186 2187 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2188 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2189 iov[i].iov_len = 128; 2190 } 2191 2192 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2193 * Because the left 2 iov is not enough for a blocklen. 2194 */ 2195 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2196 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2197 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2198 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2199 } 2200 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2201 2202 /* The second child io waits until the end of the first child io before executing. 2203 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2204 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2205 */ 2206 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2207 1, 4); 2208 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2209 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2210 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2211 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2212 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2213 2214 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2215 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2216 CU_ASSERT(rc == 0); 2217 CU_ASSERT(g_io_done == false); 2218 2219 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2220 stub_complete_io(1); 2221 CU_ASSERT(g_io_done == false); 2222 2223 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2224 stub_complete_io(1); 2225 CU_ASSERT(g_io_done == true); 2226 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2227 2228 /* A very complicated case. Each sg entry exceeds max_segment_size and 2229 * needs to be split. At the same time, child io must be a multiple of blocklen. 2230 * At the same time, child iovcnt exceeds parent iovcnt. 2231 */ 2232 bdev->max_segment_size = 512 + 128; 2233 bdev->max_num_segments = 3; 2234 g_io_done = false; 2235 2236 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2237 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2238 iov[i].iov_len = 512 + 256; 2239 } 2240 2241 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2242 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2243 iov[i].iov_len = 512 + 128; 2244 } 2245 2246 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2247 * Consume 4 parent IO iov entries per for() round and 6 block size. 2248 * Generate 9 child IOs. 2249 */ 2250 for (i = 0; i < 3; i++) { 2251 uint32_t j = i * 4; 2252 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2253 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2254 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2255 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2256 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2257 2258 /* Child io must be a multiple of blocklen 2259 * iov[j + 2] must be split. If the third entry is also added, 2260 * the multiple of blocklen cannot be guaranteed. But it still 2261 * occupies one iov entry of the parent child iov. 2262 */ 2263 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2264 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2265 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2266 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2267 2268 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2269 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2270 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2271 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2272 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2273 } 2274 2275 /* Child iov position at 27, the 10th child IO 2276 * iov entry index is 3 * 4 and offset is 3 * 6 2277 */ 2278 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2279 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2280 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2281 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2282 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2283 2284 /* Child iov position at 30, the 11th child IO */ 2285 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2286 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2287 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2288 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2289 2290 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2291 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2292 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2293 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2294 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2295 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2296 2297 /* Consume 9 child IOs and 27 child iov entries. 2298 * Consume 4 parent IO iov entries per for() round and 6 block size. 2299 * Parent IO iov index start from 16 and block offset start from 24 2300 */ 2301 for (i = 0; i < 3; i++) { 2302 uint32_t j = i * 4 + 16; 2303 uint32_t offset = i * 6 + 24; 2304 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2305 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2306 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2307 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2308 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2309 2310 /* Child io must be a multiple of blocklen 2311 * iov[j + 2] must be split. If the third entry is also added, 2312 * the multiple of blocklen cannot be guaranteed. But it still 2313 * occupies one iov entry of the parent child iov. 2314 */ 2315 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2316 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2317 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2318 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2319 2320 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2321 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2322 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2323 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2324 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2325 } 2326 2327 /* The 22th child IO, child iov position at 30 */ 2328 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2329 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2330 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2331 2332 /* The third round */ 2333 /* Here is the 23nd child IO and child iovpos is 0 */ 2334 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2335 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2336 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2337 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2338 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2339 2340 /* The 24th child IO */ 2341 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2342 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2343 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2344 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2345 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2346 2347 /* The 25th child IO */ 2348 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2349 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2350 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2351 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2352 2353 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2354 50, io_done, NULL); 2355 CU_ASSERT(rc == 0); 2356 CU_ASSERT(g_io_done == false); 2357 2358 /* Parent IO supports up to 32 child iovs, so it is calculated that 2359 * a maximum of 11 IOs can be split at a time, and the 2360 * splitting will continue after the first batch is over. 2361 */ 2362 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2363 stub_complete_io(11); 2364 CU_ASSERT(g_io_done == false); 2365 2366 /* The 2nd round */ 2367 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2368 stub_complete_io(11); 2369 CU_ASSERT(g_io_done == false); 2370 2371 /* The last round */ 2372 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2373 stub_complete_io(3); 2374 CU_ASSERT(g_io_done == true); 2375 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2376 2377 /* Test an WRITE_ZEROES. This should also not be split. */ 2378 bdev->max_segment_size = 512; 2379 bdev->max_num_segments = 1; 2380 g_io_done = false; 2381 2382 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2383 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2384 2385 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2386 CU_ASSERT(rc == 0); 2387 CU_ASSERT(g_io_done == false); 2388 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2389 stub_complete_io(1); 2390 CU_ASSERT(g_io_done == true); 2391 2392 /* Test an UNMAP. This should also not be split. */ 2393 g_io_done = false; 2394 2395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2396 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2397 2398 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2399 CU_ASSERT(rc == 0); 2400 CU_ASSERT(g_io_done == false); 2401 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2402 stub_complete_io(1); 2403 CU_ASSERT(g_io_done == true); 2404 2405 /* Test a FLUSH. This should also not be split. */ 2406 g_io_done = false; 2407 2408 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2409 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2410 2411 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2412 CU_ASSERT(rc == 0); 2413 CU_ASSERT(g_io_done == false); 2414 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2415 stub_complete_io(1); 2416 CU_ASSERT(g_io_done == true); 2417 2418 spdk_put_io_channel(io_ch); 2419 spdk_bdev_close(desc); 2420 free_bdev(bdev); 2421 spdk_bdev_finish(bdev_fini_cb, NULL); 2422 poll_threads(); 2423 } 2424 2425 static void 2426 bdev_io_mix_split_test(void) 2427 { 2428 struct spdk_bdev *bdev; 2429 struct spdk_bdev_desc *desc = NULL; 2430 struct spdk_io_channel *io_ch; 2431 struct spdk_bdev_opts bdev_opts = {}; 2432 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2433 struct ut_expected_io *expected_io; 2434 uint64_t i; 2435 int rc; 2436 2437 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2438 bdev_opts.bdev_io_pool_size = 512; 2439 bdev_opts.bdev_io_cache_size = 64; 2440 2441 rc = spdk_bdev_set_opts(&bdev_opts); 2442 CU_ASSERT(rc == 0); 2443 spdk_bdev_initialize(bdev_init_cb, NULL); 2444 2445 bdev = allocate_bdev("bdev0"); 2446 2447 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2448 CU_ASSERT(rc == 0); 2449 SPDK_CU_ASSERT_FATAL(desc != NULL); 2450 io_ch = spdk_bdev_get_io_channel(desc); 2451 CU_ASSERT(io_ch != NULL); 2452 2453 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2454 bdev->split_on_optimal_io_boundary = true; 2455 bdev->optimal_io_boundary = 16; 2456 2457 bdev->max_segment_size = 512; 2458 bdev->max_num_segments = 16; 2459 g_io_done = false; 2460 2461 /* IO crossing the IO boundary requires split 2462 * Total 2 child IOs. 2463 */ 2464 2465 /* The 1st child IO split the segment_size to multiple segment entry */ 2466 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2467 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2468 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2469 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2470 2471 /* The 2nd child IO split the segment_size to multiple segment entry */ 2472 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2473 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2474 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2475 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2476 2477 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2478 CU_ASSERT(rc == 0); 2479 CU_ASSERT(g_io_done == false); 2480 2481 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2482 stub_complete_io(2); 2483 CU_ASSERT(g_io_done == true); 2484 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2485 2486 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2487 bdev->max_segment_size = 15 * 512; 2488 bdev->max_num_segments = 1; 2489 g_io_done = false; 2490 2491 /* IO crossing the IO boundary requires split. 2492 * The 1st child IO segment size exceeds the max_segment_size, 2493 * So 1st child IO will be splitted to multiple segment entry. 2494 * Then it split to 2 child IOs because of the max_num_segments. 2495 * Total 3 child IOs. 2496 */ 2497 2498 /* The first 2 IOs are in an IO boundary. 2499 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2500 * So it split to the first 2 IOs. 2501 */ 2502 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2503 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2504 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2505 2506 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2507 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2508 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2509 2510 /* The 3rd Child IO is because of the io boundary */ 2511 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2512 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2513 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2514 2515 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2516 CU_ASSERT(rc == 0); 2517 CU_ASSERT(g_io_done == false); 2518 2519 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2520 stub_complete_io(3); 2521 CU_ASSERT(g_io_done == true); 2522 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2523 2524 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2525 bdev->max_segment_size = 17 * 512; 2526 bdev->max_num_segments = 1; 2527 g_io_done = false; 2528 2529 /* IO crossing the IO boundary requires split. 2530 * Child IO does not split. 2531 * Total 2 child IOs. 2532 */ 2533 2534 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2535 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2536 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2537 2538 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2539 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2540 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2541 2542 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2543 CU_ASSERT(rc == 0); 2544 CU_ASSERT(g_io_done == false); 2545 2546 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2547 stub_complete_io(2); 2548 CU_ASSERT(g_io_done == true); 2549 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2550 2551 /* Now set up a more complex, multi-vector command that needs to be split, 2552 * including splitting iovecs. 2553 * optimal_io_boundary < max_segment_size * max_num_segments 2554 */ 2555 bdev->max_segment_size = 3 * 512; 2556 bdev->max_num_segments = 6; 2557 g_io_done = false; 2558 2559 iov[0].iov_base = (void *)0x10000; 2560 iov[0].iov_len = 4 * 512; 2561 iov[1].iov_base = (void *)0x20000; 2562 iov[1].iov_len = 4 * 512; 2563 iov[2].iov_base = (void *)0x30000; 2564 iov[2].iov_len = 10 * 512; 2565 2566 /* IO crossing the IO boundary requires split. 2567 * The 1st child IO segment size exceeds the max_segment_size and after 2568 * splitting segment_size, the num_segments exceeds max_num_segments. 2569 * So 1st child IO will be splitted to 2 child IOs. 2570 * Total 3 child IOs. 2571 */ 2572 2573 /* The first 2 IOs are in an IO boundary. 2574 * After splitting segment size the segment num exceeds. 2575 * So it splits to 2 child IOs. 2576 */ 2577 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2578 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2579 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2580 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2581 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2582 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2583 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2584 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2585 2586 /* The 2nd child IO has the left segment entry */ 2587 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2588 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2589 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2590 2591 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2592 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2593 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2594 2595 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2596 CU_ASSERT(rc == 0); 2597 CU_ASSERT(g_io_done == false); 2598 2599 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2600 stub_complete_io(3); 2601 CU_ASSERT(g_io_done == true); 2602 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2603 2604 /* A very complicated case. Each sg entry exceeds max_segment_size 2605 * and split on io boundary. 2606 * optimal_io_boundary < max_segment_size * max_num_segments 2607 */ 2608 bdev->max_segment_size = 3 * 512; 2609 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2610 g_io_done = false; 2611 2612 for (i = 0; i < 20; i++) { 2613 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2614 iov[i].iov_len = 512 * 4; 2615 } 2616 2617 /* IO crossing the IO boundary requires split. 2618 * 80 block length can split 5 child IOs base on offset and IO boundary. 2619 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2620 * Total 5 child IOs. 2621 */ 2622 2623 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2624 * So each child IO occupies 8 child iov entries. 2625 */ 2626 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2627 for (i = 0; i < 4; i++) { 2628 int iovcnt = i * 2; 2629 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2630 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2631 } 2632 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2633 2634 /* 2nd child IO and total 16 child iov entries of parent IO */ 2635 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2636 for (i = 4; i < 8; i++) { 2637 int iovcnt = (i - 4) * 2; 2638 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2639 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2640 } 2641 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2642 2643 /* 3rd child IO and total 24 child iov entries of parent IO */ 2644 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2645 for (i = 8; i < 12; i++) { 2646 int iovcnt = (i - 8) * 2; 2647 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2648 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2649 } 2650 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2651 2652 /* 4th child IO and total 32 child iov entries of parent IO */ 2653 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2654 for (i = 12; i < 16; i++) { 2655 int iovcnt = (i - 12) * 2; 2656 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2657 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2658 } 2659 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2660 2661 /* 5th child IO and because of the child iov entry it should be splitted 2662 * in next round. 2663 */ 2664 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2665 for (i = 16; i < 20; i++) { 2666 int iovcnt = (i - 16) * 2; 2667 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2668 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2669 } 2670 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2671 2672 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2673 CU_ASSERT(rc == 0); 2674 CU_ASSERT(g_io_done == false); 2675 2676 /* First split round */ 2677 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2678 stub_complete_io(4); 2679 CU_ASSERT(g_io_done == false); 2680 2681 /* Second split round */ 2682 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2683 stub_complete_io(1); 2684 CU_ASSERT(g_io_done == true); 2685 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2686 2687 spdk_put_io_channel(io_ch); 2688 spdk_bdev_close(desc); 2689 free_bdev(bdev); 2690 spdk_bdev_finish(bdev_fini_cb, NULL); 2691 poll_threads(); 2692 } 2693 2694 static void 2695 bdev_io_split_with_io_wait(void) 2696 { 2697 struct spdk_bdev *bdev; 2698 struct spdk_bdev_desc *desc = NULL; 2699 struct spdk_io_channel *io_ch; 2700 struct spdk_bdev_channel *channel; 2701 struct spdk_bdev_mgmt_channel *mgmt_ch; 2702 struct spdk_bdev_opts bdev_opts = {}; 2703 struct iovec iov[3]; 2704 struct ut_expected_io *expected_io; 2705 int rc; 2706 2707 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2708 bdev_opts.bdev_io_pool_size = 2; 2709 bdev_opts.bdev_io_cache_size = 1; 2710 2711 rc = spdk_bdev_set_opts(&bdev_opts); 2712 CU_ASSERT(rc == 0); 2713 spdk_bdev_initialize(bdev_init_cb, NULL); 2714 2715 bdev = allocate_bdev("bdev0"); 2716 2717 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2718 CU_ASSERT(rc == 0); 2719 CU_ASSERT(desc != NULL); 2720 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2721 io_ch = spdk_bdev_get_io_channel(desc); 2722 CU_ASSERT(io_ch != NULL); 2723 channel = spdk_io_channel_get_ctx(io_ch); 2724 mgmt_ch = channel->shared_resource->mgmt_ch; 2725 2726 bdev->optimal_io_boundary = 16; 2727 bdev->split_on_optimal_io_boundary = true; 2728 2729 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2730 CU_ASSERT(rc == 0); 2731 2732 /* Now test that a single-vector command is split correctly. 2733 * Offset 14, length 8, payload 0xF000 2734 * Child - Offset 14, length 2, payload 0xF000 2735 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2736 * 2737 * Set up the expected values before calling spdk_bdev_read_blocks 2738 */ 2739 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2740 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2741 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2742 2743 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2744 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2745 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2746 2747 /* The following children will be submitted sequentially due to the capacity of 2748 * spdk_bdev_io. 2749 */ 2750 2751 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2752 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2753 CU_ASSERT(rc == 0); 2754 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2755 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2756 2757 /* Completing the first read I/O will submit the first child */ 2758 stub_complete_io(1); 2759 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2760 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2761 2762 /* Completing the first child will submit the second child */ 2763 stub_complete_io(1); 2764 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2765 2766 /* Complete the second child I/O. This should result in our callback getting 2767 * invoked since the parent I/O is now complete. 2768 */ 2769 stub_complete_io(1); 2770 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2771 2772 /* Now set up a more complex, multi-vector command that needs to be split, 2773 * including splitting iovecs. 2774 */ 2775 iov[0].iov_base = (void *)0x10000; 2776 iov[0].iov_len = 512; 2777 iov[1].iov_base = (void *)0x20000; 2778 iov[1].iov_len = 20 * 512; 2779 iov[2].iov_base = (void *)0x30000; 2780 iov[2].iov_len = 11 * 512; 2781 2782 g_io_done = false; 2783 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2784 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2785 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2786 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2787 2788 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2789 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2790 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2791 2792 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2793 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2794 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2795 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2796 2797 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2798 CU_ASSERT(rc == 0); 2799 CU_ASSERT(g_io_done == false); 2800 2801 /* The following children will be submitted sequentially due to the capacity of 2802 * spdk_bdev_io. 2803 */ 2804 2805 /* Completing the first child will submit the second child */ 2806 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2807 stub_complete_io(1); 2808 CU_ASSERT(g_io_done == false); 2809 2810 /* Completing the second child will submit the third child */ 2811 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2812 stub_complete_io(1); 2813 CU_ASSERT(g_io_done == false); 2814 2815 /* Completing the third child will result in our callback getting invoked 2816 * since the parent I/O is now complete. 2817 */ 2818 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2819 stub_complete_io(1); 2820 CU_ASSERT(g_io_done == true); 2821 2822 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2823 2824 spdk_put_io_channel(io_ch); 2825 spdk_bdev_close(desc); 2826 free_bdev(bdev); 2827 spdk_bdev_finish(bdev_fini_cb, NULL); 2828 poll_threads(); 2829 } 2830 2831 static void 2832 bdev_io_alignment(void) 2833 { 2834 struct spdk_bdev *bdev; 2835 struct spdk_bdev_desc *desc = NULL; 2836 struct spdk_io_channel *io_ch; 2837 struct spdk_bdev_opts bdev_opts = {}; 2838 int rc; 2839 void *buf = NULL; 2840 struct iovec iovs[2]; 2841 int iovcnt; 2842 uint64_t alignment; 2843 2844 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2845 bdev_opts.bdev_io_pool_size = 20; 2846 bdev_opts.bdev_io_cache_size = 2; 2847 2848 rc = spdk_bdev_set_opts(&bdev_opts); 2849 CU_ASSERT(rc == 0); 2850 spdk_bdev_initialize(bdev_init_cb, NULL); 2851 2852 fn_table.submit_request = stub_submit_request_get_buf; 2853 bdev = allocate_bdev("bdev0"); 2854 2855 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2856 CU_ASSERT(rc == 0); 2857 CU_ASSERT(desc != NULL); 2858 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2859 io_ch = spdk_bdev_get_io_channel(desc); 2860 CU_ASSERT(io_ch != NULL); 2861 2862 /* Create aligned buffer */ 2863 rc = posix_memalign(&buf, 4096, 8192); 2864 SPDK_CU_ASSERT_FATAL(rc == 0); 2865 2866 /* Pass aligned single buffer with no alignment required */ 2867 alignment = 1; 2868 bdev->required_alignment = spdk_u32log2(alignment); 2869 2870 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2871 CU_ASSERT(rc == 0); 2872 stub_complete_io(1); 2873 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2874 alignment)); 2875 2876 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2877 CU_ASSERT(rc == 0); 2878 stub_complete_io(1); 2879 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2880 alignment)); 2881 2882 /* Pass unaligned single buffer with no alignment required */ 2883 alignment = 1; 2884 bdev->required_alignment = spdk_u32log2(alignment); 2885 2886 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2887 CU_ASSERT(rc == 0); 2888 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2889 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2890 stub_complete_io(1); 2891 2892 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2893 CU_ASSERT(rc == 0); 2894 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2895 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2896 stub_complete_io(1); 2897 2898 /* Pass unaligned single buffer with 512 alignment required */ 2899 alignment = 512; 2900 bdev->required_alignment = spdk_u32log2(alignment); 2901 2902 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2903 CU_ASSERT(rc == 0); 2904 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2905 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2906 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2907 alignment)); 2908 stub_complete_io(1); 2909 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2910 2911 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2912 CU_ASSERT(rc == 0); 2913 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2914 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2915 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2916 alignment)); 2917 stub_complete_io(1); 2918 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2919 2920 /* Pass unaligned single buffer with 4096 alignment required */ 2921 alignment = 4096; 2922 bdev->required_alignment = spdk_u32log2(alignment); 2923 2924 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2925 CU_ASSERT(rc == 0); 2926 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2927 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2928 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2929 alignment)); 2930 stub_complete_io(1); 2931 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2932 2933 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2934 CU_ASSERT(rc == 0); 2935 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2936 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2937 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2938 alignment)); 2939 stub_complete_io(1); 2940 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2941 2942 /* Pass aligned iovs with no alignment required */ 2943 alignment = 1; 2944 bdev->required_alignment = spdk_u32log2(alignment); 2945 2946 iovcnt = 1; 2947 iovs[0].iov_base = buf; 2948 iovs[0].iov_len = 512; 2949 2950 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2951 CU_ASSERT(rc == 0); 2952 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2953 stub_complete_io(1); 2954 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2955 2956 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2957 CU_ASSERT(rc == 0); 2958 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2959 stub_complete_io(1); 2960 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2961 2962 /* Pass unaligned iovs with no alignment required */ 2963 alignment = 1; 2964 bdev->required_alignment = spdk_u32log2(alignment); 2965 2966 iovcnt = 2; 2967 iovs[0].iov_base = buf + 16; 2968 iovs[0].iov_len = 256; 2969 iovs[1].iov_base = buf + 16 + 256 + 32; 2970 iovs[1].iov_len = 256; 2971 2972 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2973 CU_ASSERT(rc == 0); 2974 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2975 stub_complete_io(1); 2976 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2977 2978 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2979 CU_ASSERT(rc == 0); 2980 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2981 stub_complete_io(1); 2982 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2983 2984 /* Pass unaligned iov with 2048 alignment required */ 2985 alignment = 2048; 2986 bdev->required_alignment = spdk_u32log2(alignment); 2987 2988 iovcnt = 2; 2989 iovs[0].iov_base = buf + 16; 2990 iovs[0].iov_len = 256; 2991 iovs[1].iov_base = buf + 16 + 256 + 32; 2992 iovs[1].iov_len = 256; 2993 2994 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2995 CU_ASSERT(rc == 0); 2996 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2997 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2998 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2999 alignment)); 3000 stub_complete_io(1); 3001 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3002 3003 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3004 CU_ASSERT(rc == 0); 3005 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3006 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3007 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3008 alignment)); 3009 stub_complete_io(1); 3010 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3011 3012 /* Pass iov without allocated buffer without alignment required */ 3013 alignment = 1; 3014 bdev->required_alignment = spdk_u32log2(alignment); 3015 3016 iovcnt = 1; 3017 iovs[0].iov_base = NULL; 3018 iovs[0].iov_len = 0; 3019 3020 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3021 CU_ASSERT(rc == 0); 3022 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3023 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3024 alignment)); 3025 stub_complete_io(1); 3026 3027 /* Pass iov without allocated buffer with 1024 alignment required */ 3028 alignment = 1024; 3029 bdev->required_alignment = spdk_u32log2(alignment); 3030 3031 iovcnt = 1; 3032 iovs[0].iov_base = NULL; 3033 iovs[0].iov_len = 0; 3034 3035 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3036 CU_ASSERT(rc == 0); 3037 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3038 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3039 alignment)); 3040 stub_complete_io(1); 3041 3042 spdk_put_io_channel(io_ch); 3043 spdk_bdev_close(desc); 3044 free_bdev(bdev); 3045 fn_table.submit_request = stub_submit_request; 3046 spdk_bdev_finish(bdev_fini_cb, NULL); 3047 poll_threads(); 3048 3049 free(buf); 3050 } 3051 3052 static void 3053 bdev_io_alignment_with_boundary(void) 3054 { 3055 struct spdk_bdev *bdev; 3056 struct spdk_bdev_desc *desc = NULL; 3057 struct spdk_io_channel *io_ch; 3058 struct spdk_bdev_opts bdev_opts = {}; 3059 int rc; 3060 void *buf = NULL; 3061 struct iovec iovs[2]; 3062 int iovcnt; 3063 uint64_t alignment; 3064 3065 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3066 bdev_opts.bdev_io_pool_size = 20; 3067 bdev_opts.bdev_io_cache_size = 2; 3068 3069 bdev_opts.opts_size = sizeof(bdev_opts); 3070 rc = spdk_bdev_set_opts(&bdev_opts); 3071 CU_ASSERT(rc == 0); 3072 spdk_bdev_initialize(bdev_init_cb, NULL); 3073 3074 fn_table.submit_request = stub_submit_request_get_buf; 3075 bdev = allocate_bdev("bdev0"); 3076 3077 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3078 CU_ASSERT(rc == 0); 3079 CU_ASSERT(desc != NULL); 3080 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3081 io_ch = spdk_bdev_get_io_channel(desc); 3082 CU_ASSERT(io_ch != NULL); 3083 3084 /* Create aligned buffer */ 3085 rc = posix_memalign(&buf, 4096, 131072); 3086 SPDK_CU_ASSERT_FATAL(rc == 0); 3087 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3088 3089 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3090 alignment = 512; 3091 bdev->required_alignment = spdk_u32log2(alignment); 3092 bdev->optimal_io_boundary = 2; 3093 bdev->split_on_optimal_io_boundary = true; 3094 3095 iovcnt = 1; 3096 iovs[0].iov_base = NULL; 3097 iovs[0].iov_len = 512 * 3; 3098 3099 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3100 CU_ASSERT(rc == 0); 3101 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3102 stub_complete_io(2); 3103 3104 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3105 alignment = 512; 3106 bdev->required_alignment = spdk_u32log2(alignment); 3107 bdev->optimal_io_boundary = 16; 3108 bdev->split_on_optimal_io_boundary = true; 3109 3110 iovcnt = 1; 3111 iovs[0].iov_base = NULL; 3112 iovs[0].iov_len = 512 * 16; 3113 3114 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3115 CU_ASSERT(rc == 0); 3116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3117 stub_complete_io(2); 3118 3119 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3120 alignment = 512; 3121 bdev->required_alignment = spdk_u32log2(alignment); 3122 bdev->optimal_io_boundary = 128; 3123 bdev->split_on_optimal_io_boundary = true; 3124 3125 iovcnt = 1; 3126 iovs[0].iov_base = buf + 16; 3127 iovs[0].iov_len = 512 * 160; 3128 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3129 CU_ASSERT(rc == 0); 3130 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3131 stub_complete_io(2); 3132 3133 /* 512 * 3 with 2 IO boundary */ 3134 alignment = 512; 3135 bdev->required_alignment = spdk_u32log2(alignment); 3136 bdev->optimal_io_boundary = 2; 3137 bdev->split_on_optimal_io_boundary = true; 3138 3139 iovcnt = 2; 3140 iovs[0].iov_base = buf + 16; 3141 iovs[0].iov_len = 512; 3142 iovs[1].iov_base = buf + 16 + 512 + 32; 3143 iovs[1].iov_len = 1024; 3144 3145 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3146 CU_ASSERT(rc == 0); 3147 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3148 stub_complete_io(2); 3149 3150 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3151 CU_ASSERT(rc == 0); 3152 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3153 stub_complete_io(2); 3154 3155 /* 512 * 64 with 32 IO boundary */ 3156 bdev->optimal_io_boundary = 32; 3157 iovcnt = 2; 3158 iovs[0].iov_base = buf + 16; 3159 iovs[0].iov_len = 16384; 3160 iovs[1].iov_base = buf + 16 + 16384 + 32; 3161 iovs[1].iov_len = 16384; 3162 3163 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3164 CU_ASSERT(rc == 0); 3165 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3166 stub_complete_io(3); 3167 3168 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3169 CU_ASSERT(rc == 0); 3170 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3171 stub_complete_io(3); 3172 3173 /* 512 * 160 with 32 IO boundary */ 3174 iovcnt = 1; 3175 iovs[0].iov_base = buf + 16; 3176 iovs[0].iov_len = 16384 + 65536; 3177 3178 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3179 CU_ASSERT(rc == 0); 3180 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3181 stub_complete_io(6); 3182 3183 spdk_put_io_channel(io_ch); 3184 spdk_bdev_close(desc); 3185 free_bdev(bdev); 3186 fn_table.submit_request = stub_submit_request; 3187 spdk_bdev_finish(bdev_fini_cb, NULL); 3188 poll_threads(); 3189 3190 free(buf); 3191 } 3192 3193 static void 3194 histogram_status_cb(void *cb_arg, int status) 3195 { 3196 g_status = status; 3197 } 3198 3199 static void 3200 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3201 { 3202 g_status = status; 3203 g_histogram = histogram; 3204 } 3205 3206 static void 3207 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3208 uint64_t total, uint64_t so_far) 3209 { 3210 g_count += count; 3211 } 3212 3213 static void 3214 bdev_histograms(void) 3215 { 3216 struct spdk_bdev *bdev; 3217 struct spdk_bdev_desc *desc = NULL; 3218 struct spdk_io_channel *ch; 3219 struct spdk_histogram_data *histogram; 3220 uint8_t buf[4096]; 3221 int rc; 3222 3223 spdk_bdev_initialize(bdev_init_cb, NULL); 3224 3225 bdev = allocate_bdev("bdev"); 3226 3227 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3228 CU_ASSERT(rc == 0); 3229 CU_ASSERT(desc != NULL); 3230 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3231 3232 ch = spdk_bdev_get_io_channel(desc); 3233 CU_ASSERT(ch != NULL); 3234 3235 /* Enable histogram */ 3236 g_status = -1; 3237 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3238 poll_threads(); 3239 CU_ASSERT(g_status == 0); 3240 CU_ASSERT(bdev->internal.histogram_enabled == true); 3241 3242 /* Allocate histogram */ 3243 histogram = spdk_histogram_data_alloc(); 3244 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3245 3246 /* Check if histogram is zeroed */ 3247 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3248 poll_threads(); 3249 CU_ASSERT(g_status == 0); 3250 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3251 3252 g_count = 0; 3253 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3254 3255 CU_ASSERT(g_count == 0); 3256 3257 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3258 CU_ASSERT(rc == 0); 3259 3260 spdk_delay_us(10); 3261 stub_complete_io(1); 3262 poll_threads(); 3263 3264 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3265 CU_ASSERT(rc == 0); 3266 3267 spdk_delay_us(10); 3268 stub_complete_io(1); 3269 poll_threads(); 3270 3271 /* Check if histogram gathered data from all I/O channels */ 3272 g_histogram = NULL; 3273 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3274 poll_threads(); 3275 CU_ASSERT(g_status == 0); 3276 CU_ASSERT(bdev->internal.histogram_enabled == true); 3277 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3278 3279 g_count = 0; 3280 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3281 CU_ASSERT(g_count == 2); 3282 3283 /* Disable histogram */ 3284 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3285 poll_threads(); 3286 CU_ASSERT(g_status == 0); 3287 CU_ASSERT(bdev->internal.histogram_enabled == false); 3288 3289 /* Try to run histogram commands on disabled bdev */ 3290 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3291 poll_threads(); 3292 CU_ASSERT(g_status == -EFAULT); 3293 3294 spdk_histogram_data_free(histogram); 3295 spdk_put_io_channel(ch); 3296 spdk_bdev_close(desc); 3297 free_bdev(bdev); 3298 spdk_bdev_finish(bdev_fini_cb, NULL); 3299 poll_threads(); 3300 } 3301 3302 static void 3303 _bdev_compare(bool emulated) 3304 { 3305 struct spdk_bdev *bdev; 3306 struct spdk_bdev_desc *desc = NULL; 3307 struct spdk_io_channel *ioch; 3308 struct ut_expected_io *expected_io; 3309 uint64_t offset, num_blocks; 3310 uint32_t num_completed; 3311 char aa_buf[512]; 3312 char bb_buf[512]; 3313 struct iovec compare_iov; 3314 uint8_t io_type; 3315 int rc; 3316 3317 if (emulated) { 3318 io_type = SPDK_BDEV_IO_TYPE_READ; 3319 } else { 3320 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3321 } 3322 3323 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3324 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3325 3326 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3327 3328 spdk_bdev_initialize(bdev_init_cb, NULL); 3329 fn_table.submit_request = stub_submit_request_get_buf; 3330 bdev = allocate_bdev("bdev"); 3331 3332 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3333 CU_ASSERT_EQUAL(rc, 0); 3334 SPDK_CU_ASSERT_FATAL(desc != NULL); 3335 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3336 ioch = spdk_bdev_get_io_channel(desc); 3337 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3338 3339 fn_table.submit_request = stub_submit_request_get_buf; 3340 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3341 3342 offset = 50; 3343 num_blocks = 1; 3344 compare_iov.iov_base = aa_buf; 3345 compare_iov.iov_len = sizeof(aa_buf); 3346 3347 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3348 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3349 3350 g_io_done = false; 3351 g_compare_read_buf = aa_buf; 3352 g_compare_read_buf_len = sizeof(aa_buf); 3353 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3354 CU_ASSERT_EQUAL(rc, 0); 3355 num_completed = stub_complete_io(1); 3356 CU_ASSERT_EQUAL(num_completed, 1); 3357 CU_ASSERT(g_io_done == true); 3358 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3359 3360 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3361 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3362 3363 g_io_done = false; 3364 g_compare_read_buf = bb_buf; 3365 g_compare_read_buf_len = sizeof(bb_buf); 3366 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3367 CU_ASSERT_EQUAL(rc, 0); 3368 num_completed = stub_complete_io(1); 3369 CU_ASSERT_EQUAL(num_completed, 1); 3370 CU_ASSERT(g_io_done == true); 3371 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3372 3373 spdk_put_io_channel(ioch); 3374 spdk_bdev_close(desc); 3375 free_bdev(bdev); 3376 fn_table.submit_request = stub_submit_request; 3377 spdk_bdev_finish(bdev_fini_cb, NULL); 3378 poll_threads(); 3379 3380 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3381 3382 g_compare_read_buf = NULL; 3383 } 3384 3385 static void 3386 bdev_compare(void) 3387 { 3388 _bdev_compare(true); 3389 _bdev_compare(false); 3390 } 3391 3392 static void 3393 bdev_compare_and_write(void) 3394 { 3395 struct spdk_bdev *bdev; 3396 struct spdk_bdev_desc *desc = NULL; 3397 struct spdk_io_channel *ioch; 3398 struct ut_expected_io *expected_io; 3399 uint64_t offset, num_blocks; 3400 uint32_t num_completed; 3401 char aa_buf[512]; 3402 char bb_buf[512]; 3403 char cc_buf[512]; 3404 char write_buf[512]; 3405 struct iovec compare_iov; 3406 struct iovec write_iov; 3407 int rc; 3408 3409 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3410 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3411 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3412 3413 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3414 3415 spdk_bdev_initialize(bdev_init_cb, NULL); 3416 fn_table.submit_request = stub_submit_request_get_buf; 3417 bdev = allocate_bdev("bdev"); 3418 3419 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3420 CU_ASSERT_EQUAL(rc, 0); 3421 SPDK_CU_ASSERT_FATAL(desc != NULL); 3422 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3423 ioch = spdk_bdev_get_io_channel(desc); 3424 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3425 3426 fn_table.submit_request = stub_submit_request_get_buf; 3427 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3428 3429 offset = 50; 3430 num_blocks = 1; 3431 compare_iov.iov_base = aa_buf; 3432 compare_iov.iov_len = sizeof(aa_buf); 3433 write_iov.iov_base = bb_buf; 3434 write_iov.iov_len = sizeof(bb_buf); 3435 3436 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3437 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3438 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3439 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3440 3441 g_io_done = false; 3442 g_compare_read_buf = aa_buf; 3443 g_compare_read_buf_len = sizeof(aa_buf); 3444 memset(write_buf, 0, sizeof(write_buf)); 3445 g_compare_write_buf = write_buf; 3446 g_compare_write_buf_len = sizeof(write_buf); 3447 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3448 offset, num_blocks, io_done, NULL); 3449 /* Trigger range locking */ 3450 poll_threads(); 3451 CU_ASSERT_EQUAL(rc, 0); 3452 num_completed = stub_complete_io(1); 3453 CU_ASSERT_EQUAL(num_completed, 1); 3454 CU_ASSERT(g_io_done == false); 3455 num_completed = stub_complete_io(1); 3456 /* Trigger range unlocking */ 3457 poll_threads(); 3458 CU_ASSERT_EQUAL(num_completed, 1); 3459 CU_ASSERT(g_io_done == true); 3460 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3461 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3462 3463 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3464 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3465 3466 g_io_done = false; 3467 g_compare_read_buf = cc_buf; 3468 g_compare_read_buf_len = sizeof(cc_buf); 3469 memset(write_buf, 0, sizeof(write_buf)); 3470 g_compare_write_buf = write_buf; 3471 g_compare_write_buf_len = sizeof(write_buf); 3472 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3473 offset, num_blocks, io_done, NULL); 3474 /* Trigger range locking */ 3475 poll_threads(); 3476 CU_ASSERT_EQUAL(rc, 0); 3477 num_completed = stub_complete_io(1); 3478 /* Trigger range unlocking earlier because we expect error here */ 3479 poll_threads(); 3480 CU_ASSERT_EQUAL(num_completed, 1); 3481 CU_ASSERT(g_io_done == true); 3482 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3483 num_completed = stub_complete_io(1); 3484 CU_ASSERT_EQUAL(num_completed, 0); 3485 3486 spdk_put_io_channel(ioch); 3487 spdk_bdev_close(desc); 3488 free_bdev(bdev); 3489 fn_table.submit_request = stub_submit_request; 3490 spdk_bdev_finish(bdev_fini_cb, NULL); 3491 poll_threads(); 3492 3493 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3494 3495 g_compare_read_buf = NULL; 3496 g_compare_write_buf = NULL; 3497 } 3498 3499 static void 3500 bdev_write_zeroes(void) 3501 { 3502 struct spdk_bdev *bdev; 3503 struct spdk_bdev_desc *desc = NULL; 3504 struct spdk_io_channel *ioch; 3505 struct ut_expected_io *expected_io; 3506 uint64_t offset, num_io_blocks, num_blocks; 3507 uint32_t num_completed, num_requests; 3508 int rc; 3509 3510 spdk_bdev_initialize(bdev_init_cb, NULL); 3511 bdev = allocate_bdev("bdev"); 3512 3513 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3514 CU_ASSERT_EQUAL(rc, 0); 3515 SPDK_CU_ASSERT_FATAL(desc != NULL); 3516 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3517 ioch = spdk_bdev_get_io_channel(desc); 3518 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3519 3520 fn_table.submit_request = stub_submit_request; 3521 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3522 3523 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3524 bdev->md_len = 0; 3525 bdev->blocklen = 4096; 3526 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3527 3528 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3530 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3531 CU_ASSERT_EQUAL(rc, 0); 3532 num_completed = stub_complete_io(1); 3533 CU_ASSERT_EQUAL(num_completed, 1); 3534 3535 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3536 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3537 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3538 num_requests = 2; 3539 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3540 3541 for (offset = 0; offset < num_requests; ++offset) { 3542 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3543 offset * num_io_blocks, num_io_blocks, 0); 3544 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3545 } 3546 3547 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3548 CU_ASSERT_EQUAL(rc, 0); 3549 num_completed = stub_complete_io(num_requests); 3550 CU_ASSERT_EQUAL(num_completed, num_requests); 3551 3552 /* Check that the splitting is correct if bdev has interleaved metadata */ 3553 bdev->md_interleave = true; 3554 bdev->md_len = 64; 3555 bdev->blocklen = 4096 + 64; 3556 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3557 3558 num_requests = offset = 0; 3559 while (offset < num_blocks) { 3560 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3561 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3562 offset, num_io_blocks, 0); 3563 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3564 offset += num_io_blocks; 3565 num_requests++; 3566 } 3567 3568 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3569 CU_ASSERT_EQUAL(rc, 0); 3570 num_completed = stub_complete_io(num_requests); 3571 CU_ASSERT_EQUAL(num_completed, num_requests); 3572 num_completed = stub_complete_io(num_requests); 3573 assert(num_completed == 0); 3574 3575 /* Check the the same for separate metadata buffer */ 3576 bdev->md_interleave = false; 3577 bdev->md_len = 64; 3578 bdev->blocklen = 4096; 3579 3580 num_requests = offset = 0; 3581 while (offset < num_blocks) { 3582 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3583 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3584 offset, num_io_blocks, 0); 3585 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3586 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3587 offset += num_io_blocks; 3588 num_requests++; 3589 } 3590 3591 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3592 CU_ASSERT_EQUAL(rc, 0); 3593 num_completed = stub_complete_io(num_requests); 3594 CU_ASSERT_EQUAL(num_completed, num_requests); 3595 3596 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3597 spdk_put_io_channel(ioch); 3598 spdk_bdev_close(desc); 3599 free_bdev(bdev); 3600 spdk_bdev_finish(bdev_fini_cb, NULL); 3601 poll_threads(); 3602 } 3603 3604 static void 3605 bdev_zcopy_write(void) 3606 { 3607 struct spdk_bdev *bdev; 3608 struct spdk_bdev_desc *desc = NULL; 3609 struct spdk_io_channel *ioch; 3610 struct ut_expected_io *expected_io; 3611 uint64_t offset, num_blocks; 3612 uint32_t num_completed; 3613 char aa_buf[512]; 3614 struct iovec iov; 3615 int rc; 3616 const bool populate = false; 3617 const bool commit = true; 3618 3619 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3620 3621 spdk_bdev_initialize(bdev_init_cb, NULL); 3622 bdev = allocate_bdev("bdev"); 3623 3624 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3625 CU_ASSERT_EQUAL(rc, 0); 3626 SPDK_CU_ASSERT_FATAL(desc != NULL); 3627 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3628 ioch = spdk_bdev_get_io_channel(desc); 3629 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3630 3631 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3632 3633 offset = 50; 3634 num_blocks = 1; 3635 iov.iov_base = NULL; 3636 iov.iov_len = 0; 3637 3638 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3639 g_zcopy_read_buf_len = (uint32_t) -1; 3640 /* Do a zcopy start for a write (populate=false) */ 3641 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3642 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3643 g_io_done = false; 3644 g_zcopy_write_buf = aa_buf; 3645 g_zcopy_write_buf_len = sizeof(aa_buf); 3646 g_zcopy_bdev_io = NULL; 3647 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3648 CU_ASSERT_EQUAL(rc, 0); 3649 num_completed = stub_complete_io(1); 3650 CU_ASSERT_EQUAL(num_completed, 1); 3651 CU_ASSERT(g_io_done == true); 3652 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3653 /* Check that the iov has been set up */ 3654 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3655 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3656 /* Check that the bdev_io has been saved */ 3657 CU_ASSERT(g_zcopy_bdev_io != NULL); 3658 /* Now do the zcopy end for a write (commit=true) */ 3659 g_io_done = false; 3660 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3661 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3662 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3663 CU_ASSERT_EQUAL(rc, 0); 3664 num_completed = stub_complete_io(1); 3665 CU_ASSERT_EQUAL(num_completed, 1); 3666 CU_ASSERT(g_io_done == true); 3667 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3668 /* Check the g_zcopy are reset by io_done */ 3669 CU_ASSERT(g_zcopy_write_buf == NULL); 3670 CU_ASSERT(g_zcopy_write_buf_len == 0); 3671 /* Check that io_done has freed the g_zcopy_bdev_io */ 3672 CU_ASSERT(g_zcopy_bdev_io == NULL); 3673 3674 /* Check the zcopy read buffer has not been touched which 3675 * ensures that the correct buffers were used. 3676 */ 3677 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3678 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3679 3680 spdk_put_io_channel(ioch); 3681 spdk_bdev_close(desc); 3682 free_bdev(bdev); 3683 spdk_bdev_finish(bdev_fini_cb, NULL); 3684 poll_threads(); 3685 } 3686 3687 static void 3688 bdev_zcopy_read(void) 3689 { 3690 struct spdk_bdev *bdev; 3691 struct spdk_bdev_desc *desc = NULL; 3692 struct spdk_io_channel *ioch; 3693 struct ut_expected_io *expected_io; 3694 uint64_t offset, num_blocks; 3695 uint32_t num_completed; 3696 char aa_buf[512]; 3697 struct iovec iov; 3698 int rc; 3699 const bool populate = true; 3700 const bool commit = false; 3701 3702 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3703 3704 spdk_bdev_initialize(bdev_init_cb, NULL); 3705 bdev = allocate_bdev("bdev"); 3706 3707 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3708 CU_ASSERT_EQUAL(rc, 0); 3709 SPDK_CU_ASSERT_FATAL(desc != NULL); 3710 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3711 ioch = spdk_bdev_get_io_channel(desc); 3712 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3713 3714 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3715 3716 offset = 50; 3717 num_blocks = 1; 3718 iov.iov_base = NULL; 3719 iov.iov_len = 0; 3720 3721 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3722 g_zcopy_write_buf_len = (uint32_t) -1; 3723 3724 /* Do a zcopy start for a read (populate=true) */ 3725 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3726 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3727 g_io_done = false; 3728 g_zcopy_read_buf = aa_buf; 3729 g_zcopy_read_buf_len = sizeof(aa_buf); 3730 g_zcopy_bdev_io = NULL; 3731 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3732 CU_ASSERT_EQUAL(rc, 0); 3733 num_completed = stub_complete_io(1); 3734 CU_ASSERT_EQUAL(num_completed, 1); 3735 CU_ASSERT(g_io_done == true); 3736 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3737 /* Check that the iov has been set up */ 3738 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3739 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3740 /* Check that the bdev_io has been saved */ 3741 CU_ASSERT(g_zcopy_bdev_io != NULL); 3742 3743 /* Now do the zcopy end for a read (commit=false) */ 3744 g_io_done = false; 3745 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3746 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3747 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3748 CU_ASSERT_EQUAL(rc, 0); 3749 num_completed = stub_complete_io(1); 3750 CU_ASSERT_EQUAL(num_completed, 1); 3751 CU_ASSERT(g_io_done == true); 3752 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3753 /* Check the g_zcopy are reset by io_done */ 3754 CU_ASSERT(g_zcopy_read_buf == NULL); 3755 CU_ASSERT(g_zcopy_read_buf_len == 0); 3756 /* Check that io_done has freed the g_zcopy_bdev_io */ 3757 CU_ASSERT(g_zcopy_bdev_io == NULL); 3758 3759 /* Check the zcopy write buffer has not been touched which 3760 * ensures that the correct buffers were used. 3761 */ 3762 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3763 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3764 3765 spdk_put_io_channel(ioch); 3766 spdk_bdev_close(desc); 3767 free_bdev(bdev); 3768 spdk_bdev_finish(bdev_fini_cb, NULL); 3769 poll_threads(); 3770 } 3771 3772 static void 3773 bdev_open_while_hotremove(void) 3774 { 3775 struct spdk_bdev *bdev; 3776 struct spdk_bdev_desc *desc[2] = {}; 3777 int rc; 3778 3779 bdev = allocate_bdev("bdev"); 3780 3781 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3782 CU_ASSERT(rc == 0); 3783 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3784 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3785 3786 spdk_bdev_unregister(bdev, NULL, NULL); 3787 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 3788 poll_threads(); 3789 3790 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3791 CU_ASSERT(rc == -ENODEV); 3792 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3793 3794 spdk_bdev_close(desc[0]); 3795 free_bdev(bdev); 3796 } 3797 3798 static void 3799 bdev_close_while_hotremove(void) 3800 { 3801 struct spdk_bdev *bdev; 3802 struct spdk_bdev_desc *desc = NULL; 3803 int rc = 0; 3804 3805 bdev = allocate_bdev("bdev"); 3806 3807 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3808 CU_ASSERT_EQUAL(rc, 0); 3809 SPDK_CU_ASSERT_FATAL(desc != NULL); 3810 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3811 3812 /* Simulate hot-unplug by unregistering bdev */ 3813 g_event_type1 = 0xFF; 3814 g_unregister_arg = NULL; 3815 g_unregister_rc = -1; 3816 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3817 /* Close device while remove event is in flight */ 3818 spdk_bdev_close(desc); 3819 3820 /* Ensure that unregister callback is delayed */ 3821 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3822 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3823 3824 poll_threads(); 3825 3826 /* Event callback shall not be issued because device was closed */ 3827 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3828 /* Unregister callback is issued */ 3829 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3830 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3831 3832 free_bdev(bdev); 3833 } 3834 3835 static void 3836 bdev_open_ext(void) 3837 { 3838 struct spdk_bdev *bdev; 3839 struct spdk_bdev_desc *desc1 = NULL; 3840 struct spdk_bdev_desc *desc2 = NULL; 3841 int rc = 0; 3842 3843 bdev = allocate_bdev("bdev"); 3844 3845 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3846 CU_ASSERT_EQUAL(rc, -EINVAL); 3847 3848 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3849 CU_ASSERT_EQUAL(rc, 0); 3850 3851 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3852 CU_ASSERT_EQUAL(rc, 0); 3853 3854 g_event_type1 = 0xFF; 3855 g_event_type2 = 0xFF; 3856 3857 /* Simulate hot-unplug by unregistering bdev */ 3858 spdk_bdev_unregister(bdev, NULL, NULL); 3859 poll_threads(); 3860 3861 /* Check if correct events have been triggered in event callback fn */ 3862 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3863 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3864 3865 free_bdev(bdev); 3866 poll_threads(); 3867 } 3868 3869 static void 3870 bdev_open_ext_unregister(void) 3871 { 3872 struct spdk_bdev *bdev; 3873 struct spdk_bdev_desc *desc1 = NULL; 3874 struct spdk_bdev_desc *desc2 = NULL; 3875 struct spdk_bdev_desc *desc3 = NULL; 3876 struct spdk_bdev_desc *desc4 = NULL; 3877 int rc = 0; 3878 3879 bdev = allocate_bdev("bdev"); 3880 3881 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3882 CU_ASSERT_EQUAL(rc, -EINVAL); 3883 3884 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3885 CU_ASSERT_EQUAL(rc, 0); 3886 3887 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3888 CU_ASSERT_EQUAL(rc, 0); 3889 3890 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 3891 CU_ASSERT_EQUAL(rc, 0); 3892 3893 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 3894 CU_ASSERT_EQUAL(rc, 0); 3895 3896 g_event_type1 = 0xFF; 3897 g_event_type2 = 0xFF; 3898 g_event_type3 = 0xFF; 3899 g_event_type4 = 0xFF; 3900 3901 g_unregister_arg = NULL; 3902 g_unregister_rc = -1; 3903 3904 /* Simulate hot-unplug by unregistering bdev */ 3905 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3906 3907 /* 3908 * Unregister is handled asynchronously and event callback 3909 * (i.e., above bdev_open_cbN) will be called. 3910 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 3911 * close the desc3 and desc4 so that the bdev is not closed. 3912 */ 3913 poll_threads(); 3914 3915 /* Check if correct events have been triggered in event callback fn */ 3916 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3917 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3918 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 3919 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 3920 3921 /* Check that unregister callback is delayed */ 3922 CU_ASSERT(g_unregister_arg == NULL); 3923 CU_ASSERT(g_unregister_rc == -1); 3924 3925 /* 3926 * Explicitly close desc3. As desc4 is still opened there, the 3927 * unergister callback is still delayed to execute. 3928 */ 3929 spdk_bdev_close(desc3); 3930 CU_ASSERT(g_unregister_arg == NULL); 3931 CU_ASSERT(g_unregister_rc == -1); 3932 3933 /* 3934 * Explicitly close desc4 to trigger the ongoing bdev unregister 3935 * operation after last desc is closed. 3936 */ 3937 spdk_bdev_close(desc4); 3938 3939 /* Poll the thread for the async unregister operation */ 3940 poll_threads(); 3941 3942 /* Check that unregister callback is executed */ 3943 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 3944 CU_ASSERT(g_unregister_rc == 0); 3945 3946 free_bdev(bdev); 3947 poll_threads(); 3948 } 3949 3950 struct timeout_io_cb_arg { 3951 struct iovec iov; 3952 uint8_t type; 3953 }; 3954 3955 static int 3956 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3957 { 3958 struct spdk_bdev_io *bdev_io; 3959 int n = 0; 3960 3961 if (!ch) { 3962 return -1; 3963 } 3964 3965 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3966 n++; 3967 } 3968 3969 return n; 3970 } 3971 3972 static void 3973 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3974 { 3975 struct timeout_io_cb_arg *ctx = cb_arg; 3976 3977 ctx->type = bdev_io->type; 3978 ctx->iov.iov_base = bdev_io->iov.iov_base; 3979 ctx->iov.iov_len = bdev_io->iov.iov_len; 3980 } 3981 3982 static void 3983 bdev_set_io_timeout(void) 3984 { 3985 struct spdk_bdev *bdev; 3986 struct spdk_bdev_desc *desc = NULL; 3987 struct spdk_io_channel *io_ch = NULL; 3988 struct spdk_bdev_channel *bdev_ch = NULL; 3989 struct timeout_io_cb_arg cb_arg; 3990 3991 spdk_bdev_initialize(bdev_init_cb, NULL); 3992 3993 bdev = allocate_bdev("bdev"); 3994 3995 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3996 SPDK_CU_ASSERT_FATAL(desc != NULL); 3997 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3998 3999 io_ch = spdk_bdev_get_io_channel(desc); 4000 CU_ASSERT(io_ch != NULL); 4001 4002 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4003 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4004 4005 /* This is the part1. 4006 * We will check the bdev_ch->io_submitted list 4007 * TO make sure that it can link IOs and only the user submitted IOs 4008 */ 4009 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4010 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4011 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4012 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4013 stub_complete_io(1); 4014 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4015 stub_complete_io(1); 4016 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4017 4018 /* Split IO */ 4019 bdev->optimal_io_boundary = 16; 4020 bdev->split_on_optimal_io_boundary = true; 4021 4022 /* Now test that a single-vector command is split correctly. 4023 * Offset 14, length 8, payload 0xF000 4024 * Child - Offset 14, length 2, payload 0xF000 4025 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4026 * 4027 * Set up the expected values before calling spdk_bdev_read_blocks 4028 */ 4029 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4030 /* We count all submitted IOs including IO that are generated by splitting. */ 4031 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4032 stub_complete_io(1); 4033 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4034 stub_complete_io(1); 4035 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4036 4037 /* Also include the reset IO */ 4038 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4039 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4040 poll_threads(); 4041 stub_complete_io(1); 4042 poll_threads(); 4043 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4044 4045 /* This is part2 4046 * Test the desc timeout poller register 4047 */ 4048 4049 /* Successfully set the timeout */ 4050 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4051 CU_ASSERT(desc->io_timeout_poller != NULL); 4052 CU_ASSERT(desc->timeout_in_sec == 30); 4053 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4054 CU_ASSERT(desc->cb_arg == &cb_arg); 4055 4056 /* Change the timeout limit */ 4057 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4058 CU_ASSERT(desc->io_timeout_poller != NULL); 4059 CU_ASSERT(desc->timeout_in_sec == 20); 4060 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4061 CU_ASSERT(desc->cb_arg == &cb_arg); 4062 4063 /* Disable the timeout */ 4064 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4065 CU_ASSERT(desc->io_timeout_poller == NULL); 4066 4067 /* This the part3 4068 * We will test to catch timeout IO and check whether the IO is 4069 * the submitted one. 4070 */ 4071 memset(&cb_arg, 0, sizeof(cb_arg)); 4072 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4073 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4074 4075 /* Don't reach the limit */ 4076 spdk_delay_us(15 * spdk_get_ticks_hz()); 4077 poll_threads(); 4078 CU_ASSERT(cb_arg.type == 0); 4079 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4080 CU_ASSERT(cb_arg.iov.iov_len == 0); 4081 4082 /* 15 + 15 = 30 reach the limit */ 4083 spdk_delay_us(15 * spdk_get_ticks_hz()); 4084 poll_threads(); 4085 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4086 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4087 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4088 stub_complete_io(1); 4089 4090 /* Use the same split IO above and check the IO */ 4091 memset(&cb_arg, 0, sizeof(cb_arg)); 4092 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4093 4094 /* The first child complete in time */ 4095 spdk_delay_us(15 * spdk_get_ticks_hz()); 4096 poll_threads(); 4097 stub_complete_io(1); 4098 CU_ASSERT(cb_arg.type == 0); 4099 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4100 CU_ASSERT(cb_arg.iov.iov_len == 0); 4101 4102 /* The second child reach the limit */ 4103 spdk_delay_us(15 * spdk_get_ticks_hz()); 4104 poll_threads(); 4105 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4106 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4107 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4108 stub_complete_io(1); 4109 4110 /* Also include the reset IO */ 4111 memset(&cb_arg, 0, sizeof(cb_arg)); 4112 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4113 spdk_delay_us(30 * spdk_get_ticks_hz()); 4114 poll_threads(); 4115 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4116 stub_complete_io(1); 4117 poll_threads(); 4118 4119 spdk_put_io_channel(io_ch); 4120 spdk_bdev_close(desc); 4121 free_bdev(bdev); 4122 spdk_bdev_finish(bdev_fini_cb, NULL); 4123 poll_threads(); 4124 } 4125 4126 static void 4127 lba_range_overlap(void) 4128 { 4129 struct lba_range r1, r2; 4130 4131 r1.offset = 100; 4132 r1.length = 50; 4133 4134 r2.offset = 0; 4135 r2.length = 1; 4136 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4137 4138 r2.offset = 0; 4139 r2.length = 100; 4140 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4141 4142 r2.offset = 0; 4143 r2.length = 110; 4144 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4145 4146 r2.offset = 100; 4147 r2.length = 10; 4148 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4149 4150 r2.offset = 110; 4151 r2.length = 20; 4152 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4153 4154 r2.offset = 140; 4155 r2.length = 150; 4156 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4157 4158 r2.offset = 130; 4159 r2.length = 200; 4160 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4161 4162 r2.offset = 150; 4163 r2.length = 100; 4164 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4165 4166 r2.offset = 110; 4167 r2.length = 0; 4168 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4169 } 4170 4171 static bool g_lock_lba_range_done; 4172 static bool g_unlock_lba_range_done; 4173 4174 static void 4175 lock_lba_range_done(void *ctx, int status) 4176 { 4177 g_lock_lba_range_done = true; 4178 } 4179 4180 static void 4181 unlock_lba_range_done(void *ctx, int status) 4182 { 4183 g_unlock_lba_range_done = true; 4184 } 4185 4186 static void 4187 lock_lba_range_check_ranges(void) 4188 { 4189 struct spdk_bdev *bdev; 4190 struct spdk_bdev_desc *desc = NULL; 4191 struct spdk_io_channel *io_ch; 4192 struct spdk_bdev_channel *channel; 4193 struct lba_range *range; 4194 int ctx1; 4195 int rc; 4196 4197 spdk_bdev_initialize(bdev_init_cb, NULL); 4198 4199 bdev = allocate_bdev("bdev0"); 4200 4201 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4202 CU_ASSERT(rc == 0); 4203 CU_ASSERT(desc != NULL); 4204 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4205 io_ch = spdk_bdev_get_io_channel(desc); 4206 CU_ASSERT(io_ch != NULL); 4207 channel = spdk_io_channel_get_ctx(io_ch); 4208 4209 g_lock_lba_range_done = false; 4210 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4211 CU_ASSERT(rc == 0); 4212 poll_threads(); 4213 4214 CU_ASSERT(g_lock_lba_range_done == true); 4215 range = TAILQ_FIRST(&channel->locked_ranges); 4216 SPDK_CU_ASSERT_FATAL(range != NULL); 4217 CU_ASSERT(range->offset == 20); 4218 CU_ASSERT(range->length == 10); 4219 CU_ASSERT(range->owner_ch == channel); 4220 4221 /* Unlocks must exactly match a lock. */ 4222 g_unlock_lba_range_done = false; 4223 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4224 CU_ASSERT(rc == -EINVAL); 4225 CU_ASSERT(g_unlock_lba_range_done == false); 4226 4227 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4228 CU_ASSERT(rc == 0); 4229 spdk_delay_us(100); 4230 poll_threads(); 4231 4232 CU_ASSERT(g_unlock_lba_range_done == true); 4233 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4234 4235 spdk_put_io_channel(io_ch); 4236 spdk_bdev_close(desc); 4237 free_bdev(bdev); 4238 spdk_bdev_finish(bdev_fini_cb, NULL); 4239 poll_threads(); 4240 } 4241 4242 static void 4243 lock_lba_range_with_io_outstanding(void) 4244 { 4245 struct spdk_bdev *bdev; 4246 struct spdk_bdev_desc *desc = NULL; 4247 struct spdk_io_channel *io_ch; 4248 struct spdk_bdev_channel *channel; 4249 struct lba_range *range; 4250 char buf[4096]; 4251 int ctx1; 4252 int rc; 4253 4254 spdk_bdev_initialize(bdev_init_cb, NULL); 4255 4256 bdev = allocate_bdev("bdev0"); 4257 4258 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4259 CU_ASSERT(rc == 0); 4260 CU_ASSERT(desc != NULL); 4261 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4262 io_ch = spdk_bdev_get_io_channel(desc); 4263 CU_ASSERT(io_ch != NULL); 4264 channel = spdk_io_channel_get_ctx(io_ch); 4265 4266 g_io_done = false; 4267 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4268 CU_ASSERT(rc == 0); 4269 4270 g_lock_lba_range_done = false; 4271 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4272 CU_ASSERT(rc == 0); 4273 poll_threads(); 4274 4275 /* The lock should immediately become valid, since there are no outstanding 4276 * write I/O. 4277 */ 4278 CU_ASSERT(g_io_done == false); 4279 CU_ASSERT(g_lock_lba_range_done == true); 4280 range = TAILQ_FIRST(&channel->locked_ranges); 4281 SPDK_CU_ASSERT_FATAL(range != NULL); 4282 CU_ASSERT(range->offset == 20); 4283 CU_ASSERT(range->length == 10); 4284 CU_ASSERT(range->owner_ch == channel); 4285 CU_ASSERT(range->locked_ctx == &ctx1); 4286 4287 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4288 CU_ASSERT(rc == 0); 4289 stub_complete_io(1); 4290 spdk_delay_us(100); 4291 poll_threads(); 4292 4293 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4294 4295 /* Now try again, but with a write I/O. */ 4296 g_io_done = false; 4297 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4298 CU_ASSERT(rc == 0); 4299 4300 g_lock_lba_range_done = false; 4301 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4302 CU_ASSERT(rc == 0); 4303 poll_threads(); 4304 4305 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4306 * But note that the range should be on the channel's locked_list, to make sure no 4307 * new write I/O are started. 4308 */ 4309 CU_ASSERT(g_io_done == false); 4310 CU_ASSERT(g_lock_lba_range_done == false); 4311 range = TAILQ_FIRST(&channel->locked_ranges); 4312 SPDK_CU_ASSERT_FATAL(range != NULL); 4313 CU_ASSERT(range->offset == 20); 4314 CU_ASSERT(range->length == 10); 4315 4316 /* Complete the write I/O. This should make the lock valid (checked by confirming 4317 * our callback was invoked). 4318 */ 4319 stub_complete_io(1); 4320 spdk_delay_us(100); 4321 poll_threads(); 4322 CU_ASSERT(g_io_done == true); 4323 CU_ASSERT(g_lock_lba_range_done == true); 4324 4325 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4326 CU_ASSERT(rc == 0); 4327 poll_threads(); 4328 4329 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4330 4331 spdk_put_io_channel(io_ch); 4332 spdk_bdev_close(desc); 4333 free_bdev(bdev); 4334 spdk_bdev_finish(bdev_fini_cb, NULL); 4335 poll_threads(); 4336 } 4337 4338 static void 4339 lock_lba_range_overlapped(void) 4340 { 4341 struct spdk_bdev *bdev; 4342 struct spdk_bdev_desc *desc = NULL; 4343 struct spdk_io_channel *io_ch; 4344 struct spdk_bdev_channel *channel; 4345 struct lba_range *range; 4346 int ctx1; 4347 int rc; 4348 4349 spdk_bdev_initialize(bdev_init_cb, NULL); 4350 4351 bdev = allocate_bdev("bdev0"); 4352 4353 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4354 CU_ASSERT(rc == 0); 4355 CU_ASSERT(desc != NULL); 4356 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4357 io_ch = spdk_bdev_get_io_channel(desc); 4358 CU_ASSERT(io_ch != NULL); 4359 channel = spdk_io_channel_get_ctx(io_ch); 4360 4361 /* Lock range 20-29. */ 4362 g_lock_lba_range_done = false; 4363 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4364 CU_ASSERT(rc == 0); 4365 poll_threads(); 4366 4367 CU_ASSERT(g_lock_lba_range_done == true); 4368 range = TAILQ_FIRST(&channel->locked_ranges); 4369 SPDK_CU_ASSERT_FATAL(range != NULL); 4370 CU_ASSERT(range->offset == 20); 4371 CU_ASSERT(range->length == 10); 4372 4373 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4374 * 20-29. 4375 */ 4376 g_lock_lba_range_done = false; 4377 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4378 CU_ASSERT(rc == 0); 4379 poll_threads(); 4380 4381 CU_ASSERT(g_lock_lba_range_done == false); 4382 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4383 SPDK_CU_ASSERT_FATAL(range != NULL); 4384 CU_ASSERT(range->offset == 25); 4385 CU_ASSERT(range->length == 15); 4386 4387 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4388 * no longer overlaps with an active lock. 4389 */ 4390 g_unlock_lba_range_done = false; 4391 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4392 CU_ASSERT(rc == 0); 4393 poll_threads(); 4394 4395 CU_ASSERT(g_unlock_lba_range_done == true); 4396 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4397 range = TAILQ_FIRST(&channel->locked_ranges); 4398 SPDK_CU_ASSERT_FATAL(range != NULL); 4399 CU_ASSERT(range->offset == 25); 4400 CU_ASSERT(range->length == 15); 4401 4402 /* Lock 40-59. This should immediately lock since it does not overlap with the 4403 * currently active 25-39 lock. 4404 */ 4405 g_lock_lba_range_done = false; 4406 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4407 CU_ASSERT(rc == 0); 4408 poll_threads(); 4409 4410 CU_ASSERT(g_lock_lba_range_done == true); 4411 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4412 SPDK_CU_ASSERT_FATAL(range != NULL); 4413 range = TAILQ_NEXT(range, tailq); 4414 SPDK_CU_ASSERT_FATAL(range != NULL); 4415 CU_ASSERT(range->offset == 40); 4416 CU_ASSERT(range->length == 20); 4417 4418 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4419 g_lock_lba_range_done = false; 4420 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4421 CU_ASSERT(rc == 0); 4422 poll_threads(); 4423 4424 CU_ASSERT(g_lock_lba_range_done == false); 4425 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4426 SPDK_CU_ASSERT_FATAL(range != NULL); 4427 CU_ASSERT(range->offset == 35); 4428 CU_ASSERT(range->length == 10); 4429 4430 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4431 * the 40-59 lock is still active. 4432 */ 4433 g_unlock_lba_range_done = false; 4434 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4435 CU_ASSERT(rc == 0); 4436 poll_threads(); 4437 4438 CU_ASSERT(g_unlock_lba_range_done == true); 4439 CU_ASSERT(g_lock_lba_range_done == false); 4440 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4441 SPDK_CU_ASSERT_FATAL(range != NULL); 4442 CU_ASSERT(range->offset == 35); 4443 CU_ASSERT(range->length == 10); 4444 4445 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4446 * no longer any active overlapping locks. 4447 */ 4448 g_unlock_lba_range_done = false; 4449 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4450 CU_ASSERT(rc == 0); 4451 poll_threads(); 4452 4453 CU_ASSERT(g_unlock_lba_range_done == true); 4454 CU_ASSERT(g_lock_lba_range_done == true); 4455 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4456 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4457 SPDK_CU_ASSERT_FATAL(range != NULL); 4458 CU_ASSERT(range->offset == 35); 4459 CU_ASSERT(range->length == 10); 4460 4461 /* Finally, unlock 35-44. */ 4462 g_unlock_lba_range_done = false; 4463 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4464 CU_ASSERT(rc == 0); 4465 poll_threads(); 4466 4467 CU_ASSERT(g_unlock_lba_range_done == true); 4468 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4469 4470 spdk_put_io_channel(io_ch); 4471 spdk_bdev_close(desc); 4472 free_bdev(bdev); 4473 spdk_bdev_finish(bdev_fini_cb, NULL); 4474 poll_threads(); 4475 } 4476 4477 static void 4478 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4479 { 4480 g_abort_done = true; 4481 g_abort_status = bdev_io->internal.status; 4482 spdk_bdev_free_io(bdev_io); 4483 } 4484 4485 static void 4486 bdev_io_abort(void) 4487 { 4488 struct spdk_bdev *bdev; 4489 struct spdk_bdev_desc *desc = NULL; 4490 struct spdk_io_channel *io_ch; 4491 struct spdk_bdev_channel *channel; 4492 struct spdk_bdev_mgmt_channel *mgmt_ch; 4493 struct spdk_bdev_opts bdev_opts = {}; 4494 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4495 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4496 int rc; 4497 4498 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4499 bdev_opts.bdev_io_pool_size = 7; 4500 bdev_opts.bdev_io_cache_size = 2; 4501 4502 rc = spdk_bdev_set_opts(&bdev_opts); 4503 CU_ASSERT(rc == 0); 4504 spdk_bdev_initialize(bdev_init_cb, NULL); 4505 4506 bdev = allocate_bdev("bdev0"); 4507 4508 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4509 CU_ASSERT(rc == 0); 4510 CU_ASSERT(desc != NULL); 4511 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4512 io_ch = spdk_bdev_get_io_channel(desc); 4513 CU_ASSERT(io_ch != NULL); 4514 channel = spdk_io_channel_get_ctx(io_ch); 4515 mgmt_ch = channel->shared_resource->mgmt_ch; 4516 4517 g_abort_done = false; 4518 4519 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4520 4521 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4522 CU_ASSERT(rc == -ENOTSUP); 4523 4524 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4525 4526 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4527 CU_ASSERT(rc == 0); 4528 CU_ASSERT(g_abort_done == true); 4529 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4530 4531 /* Test the case that the target I/O was successfully aborted. */ 4532 g_io_done = false; 4533 4534 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4535 CU_ASSERT(rc == 0); 4536 CU_ASSERT(g_io_done == false); 4537 4538 g_abort_done = false; 4539 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4540 4541 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4542 CU_ASSERT(rc == 0); 4543 CU_ASSERT(g_io_done == true); 4544 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4545 stub_complete_io(1); 4546 CU_ASSERT(g_abort_done == true); 4547 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4548 4549 /* Test the case that the target I/O was not aborted because it completed 4550 * in the middle of execution of the abort. 4551 */ 4552 g_io_done = false; 4553 4554 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4555 CU_ASSERT(rc == 0); 4556 CU_ASSERT(g_io_done == false); 4557 4558 g_abort_done = false; 4559 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4560 4561 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4562 CU_ASSERT(rc == 0); 4563 CU_ASSERT(g_io_done == false); 4564 4565 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4566 stub_complete_io(1); 4567 CU_ASSERT(g_io_done == true); 4568 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4569 4570 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4571 stub_complete_io(1); 4572 CU_ASSERT(g_abort_done == true); 4573 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4574 4575 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4576 4577 bdev->optimal_io_boundary = 16; 4578 bdev->split_on_optimal_io_boundary = true; 4579 4580 /* Test that a single-vector command which is split is aborted correctly. 4581 * Offset 14, length 8, payload 0xF000 4582 * Child - Offset 14, length 2, payload 0xF000 4583 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4584 */ 4585 g_io_done = false; 4586 4587 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4588 CU_ASSERT(rc == 0); 4589 CU_ASSERT(g_io_done == false); 4590 4591 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4592 4593 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4594 4595 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4596 CU_ASSERT(rc == 0); 4597 CU_ASSERT(g_io_done == true); 4598 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4599 stub_complete_io(2); 4600 CU_ASSERT(g_abort_done == true); 4601 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4602 4603 /* Test that a multi-vector command that needs to be split by strip and then 4604 * needs to be split is aborted correctly. Abort is requested before the second 4605 * child I/O was submitted. The parent I/O should complete with failure without 4606 * submitting the second child I/O. 4607 */ 4608 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4609 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4610 iov[i].iov_len = 512; 4611 } 4612 4613 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4614 g_io_done = false; 4615 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4616 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4617 CU_ASSERT(rc == 0); 4618 CU_ASSERT(g_io_done == false); 4619 4620 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4621 4622 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4623 4624 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4625 CU_ASSERT(rc == 0); 4626 CU_ASSERT(g_io_done == true); 4627 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4628 stub_complete_io(1); 4629 CU_ASSERT(g_abort_done == true); 4630 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4631 4632 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4633 4634 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4635 4636 bdev->optimal_io_boundary = 16; 4637 g_io_done = false; 4638 4639 /* Test that a ingle-vector command which is split is aborted correctly. 4640 * Differently from the above, the child abort request will be submitted 4641 * sequentially due to the capacity of spdk_bdev_io. 4642 */ 4643 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4644 CU_ASSERT(rc == 0); 4645 CU_ASSERT(g_io_done == false); 4646 4647 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4648 4649 g_abort_done = false; 4650 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4651 4652 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4653 CU_ASSERT(rc == 0); 4654 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4655 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4656 4657 stub_complete_io(1); 4658 CU_ASSERT(g_io_done == true); 4659 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4660 stub_complete_io(3); 4661 CU_ASSERT(g_abort_done == true); 4662 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4663 4664 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4665 4666 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4667 4668 spdk_put_io_channel(io_ch); 4669 spdk_bdev_close(desc); 4670 free_bdev(bdev); 4671 spdk_bdev_finish(bdev_fini_cb, NULL); 4672 poll_threads(); 4673 } 4674 4675 static void 4676 bdev_unmap(void) 4677 { 4678 struct spdk_bdev *bdev; 4679 struct spdk_bdev_desc *desc = NULL; 4680 struct spdk_io_channel *ioch; 4681 struct spdk_bdev_channel *bdev_ch; 4682 struct ut_expected_io *expected_io; 4683 struct spdk_bdev_opts bdev_opts = {}; 4684 uint32_t i, num_outstanding; 4685 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4686 int rc; 4687 4688 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4689 bdev_opts.bdev_io_pool_size = 512; 4690 bdev_opts.bdev_io_cache_size = 64; 4691 rc = spdk_bdev_set_opts(&bdev_opts); 4692 CU_ASSERT(rc == 0); 4693 4694 spdk_bdev_initialize(bdev_init_cb, NULL); 4695 bdev = allocate_bdev("bdev"); 4696 4697 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4698 CU_ASSERT_EQUAL(rc, 0); 4699 SPDK_CU_ASSERT_FATAL(desc != NULL); 4700 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4701 ioch = spdk_bdev_get_io_channel(desc); 4702 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4703 bdev_ch = spdk_io_channel_get_ctx(ioch); 4704 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4705 4706 fn_table.submit_request = stub_submit_request; 4707 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4708 4709 /* Case 1: First test the request won't be split */ 4710 num_blocks = 32; 4711 4712 g_io_done = false; 4713 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4714 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4715 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4716 CU_ASSERT_EQUAL(rc, 0); 4717 CU_ASSERT(g_io_done == false); 4718 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4719 stub_complete_io(1); 4720 CU_ASSERT(g_io_done == true); 4721 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4722 4723 /* Case 2: Test the split with 2 children requests */ 4724 bdev->max_unmap = 8; 4725 bdev->max_unmap_segments = 2; 4726 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4727 num_blocks = max_unmap_blocks * 2; 4728 offset = 0; 4729 4730 g_io_done = false; 4731 for (i = 0; i < 2; i++) { 4732 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4733 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4734 offset += max_unmap_blocks; 4735 } 4736 4737 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4738 CU_ASSERT_EQUAL(rc, 0); 4739 CU_ASSERT(g_io_done == false); 4740 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4741 stub_complete_io(2); 4742 CU_ASSERT(g_io_done == true); 4743 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4744 4745 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4746 num_children = 15; 4747 num_blocks = max_unmap_blocks * num_children; 4748 g_io_done = false; 4749 offset = 0; 4750 for (i = 0; i < num_children; i++) { 4751 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4752 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4753 offset += max_unmap_blocks; 4754 } 4755 4756 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4757 CU_ASSERT_EQUAL(rc, 0); 4758 CU_ASSERT(g_io_done == false); 4759 4760 while (num_children > 0) { 4761 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4762 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4763 stub_complete_io(num_outstanding); 4764 num_children -= num_outstanding; 4765 } 4766 CU_ASSERT(g_io_done == true); 4767 4768 spdk_put_io_channel(ioch); 4769 spdk_bdev_close(desc); 4770 free_bdev(bdev); 4771 spdk_bdev_finish(bdev_fini_cb, NULL); 4772 poll_threads(); 4773 } 4774 4775 static void 4776 bdev_write_zeroes_split_test(void) 4777 { 4778 struct spdk_bdev *bdev; 4779 struct spdk_bdev_desc *desc = NULL; 4780 struct spdk_io_channel *ioch; 4781 struct spdk_bdev_channel *bdev_ch; 4782 struct ut_expected_io *expected_io; 4783 struct spdk_bdev_opts bdev_opts = {}; 4784 uint32_t i, num_outstanding; 4785 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 4786 int rc; 4787 4788 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4789 bdev_opts.bdev_io_pool_size = 512; 4790 bdev_opts.bdev_io_cache_size = 64; 4791 rc = spdk_bdev_set_opts(&bdev_opts); 4792 CU_ASSERT(rc == 0); 4793 4794 spdk_bdev_initialize(bdev_init_cb, NULL); 4795 bdev = allocate_bdev("bdev"); 4796 4797 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4798 CU_ASSERT_EQUAL(rc, 0); 4799 SPDK_CU_ASSERT_FATAL(desc != NULL); 4800 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4801 ioch = spdk_bdev_get_io_channel(desc); 4802 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4803 bdev_ch = spdk_io_channel_get_ctx(ioch); 4804 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4805 4806 fn_table.submit_request = stub_submit_request; 4807 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4808 4809 /* Case 1: First test the request won't be split */ 4810 num_blocks = 32; 4811 4812 g_io_done = false; 4813 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4814 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4815 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4816 CU_ASSERT_EQUAL(rc, 0); 4817 CU_ASSERT(g_io_done == false); 4818 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4819 stub_complete_io(1); 4820 CU_ASSERT(g_io_done == true); 4821 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4822 4823 /* Case 2: Test the split with 2 children requests */ 4824 max_write_zeroes_blocks = 8; 4825 bdev->max_write_zeroes = max_write_zeroes_blocks; 4826 num_blocks = max_write_zeroes_blocks * 2; 4827 offset = 0; 4828 4829 g_io_done = false; 4830 for (i = 0; i < 2; i++) { 4831 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4832 0); 4833 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4834 offset += max_write_zeroes_blocks; 4835 } 4836 4837 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4838 CU_ASSERT_EQUAL(rc, 0); 4839 CU_ASSERT(g_io_done == false); 4840 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4841 stub_complete_io(2); 4842 CU_ASSERT(g_io_done == true); 4843 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4844 4845 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4846 num_children = 15; 4847 num_blocks = max_write_zeroes_blocks * num_children; 4848 g_io_done = false; 4849 offset = 0; 4850 for (i = 0; i < num_children; i++) { 4851 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4852 0); 4853 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4854 offset += max_write_zeroes_blocks; 4855 } 4856 4857 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4858 CU_ASSERT_EQUAL(rc, 0); 4859 CU_ASSERT(g_io_done == false); 4860 4861 while (num_children > 0) { 4862 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4863 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4864 stub_complete_io(num_outstanding); 4865 num_children -= num_outstanding; 4866 } 4867 CU_ASSERT(g_io_done == true); 4868 4869 spdk_put_io_channel(ioch); 4870 spdk_bdev_close(desc); 4871 free_bdev(bdev); 4872 spdk_bdev_finish(bdev_fini_cb, NULL); 4873 poll_threads(); 4874 } 4875 4876 static void 4877 bdev_set_options_test(void) 4878 { 4879 struct spdk_bdev_opts bdev_opts = {}; 4880 int rc; 4881 4882 /* Case1: Do not set opts_size */ 4883 rc = spdk_bdev_set_opts(&bdev_opts); 4884 CU_ASSERT(rc == -1); 4885 4886 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4887 bdev_opts.bdev_io_pool_size = 4; 4888 bdev_opts.bdev_io_cache_size = 2; 4889 bdev_opts.small_buf_pool_size = 4; 4890 4891 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4892 rc = spdk_bdev_set_opts(&bdev_opts); 4893 CU_ASSERT(rc == -1); 4894 4895 /* Case 3: Do not set valid large_buf_pool_size */ 4896 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4897 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4898 rc = spdk_bdev_set_opts(&bdev_opts); 4899 CU_ASSERT(rc == -1); 4900 4901 /* Case4: set valid large buf_pool_size */ 4902 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4903 rc = spdk_bdev_set_opts(&bdev_opts); 4904 CU_ASSERT(rc == 0); 4905 4906 /* Case5: Set different valid value for small and large buf pool */ 4907 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4908 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4909 rc = spdk_bdev_set_opts(&bdev_opts); 4910 CU_ASSERT(rc == 0); 4911 } 4912 4913 static uint64_t 4914 get_ns_time(void) 4915 { 4916 int rc; 4917 struct timespec ts; 4918 4919 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 4920 CU_ASSERT(rc == 0); 4921 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 4922 } 4923 4924 static int 4925 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 4926 { 4927 int h1, h2; 4928 4929 if (bdev_name == NULL) { 4930 return -1; 4931 } else { 4932 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 4933 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 4934 4935 return spdk_max(h1, h2) + 1; 4936 } 4937 } 4938 4939 static void 4940 bdev_multi_allocation(void) 4941 { 4942 const int max_bdev_num = 1024 * 16; 4943 char name[max_bdev_num][16]; 4944 char noexist_name[] = "invalid_bdev"; 4945 struct spdk_bdev *bdev[max_bdev_num]; 4946 int i, j; 4947 uint64_t last_time; 4948 int bdev_num; 4949 int height; 4950 4951 for (j = 0; j < max_bdev_num; j++) { 4952 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 4953 } 4954 4955 for (i = 0; i < 16; i++) { 4956 last_time = get_ns_time(); 4957 bdev_num = 1024 * (i + 1); 4958 for (j = 0; j < bdev_num; j++) { 4959 bdev[j] = allocate_bdev(name[j]); 4960 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 4961 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 4962 } 4963 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 4964 (get_ns_time() - last_time) / 1000 / 1000); 4965 for (j = 0; j < bdev_num; j++) { 4966 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 4967 } 4968 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 4969 4970 for (j = 0; j < bdev_num; j++) { 4971 free_bdev(bdev[j]); 4972 } 4973 for (j = 0; j < bdev_num; j++) { 4974 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 4975 } 4976 } 4977 } 4978 4979 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 4980 4981 static int 4982 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 4983 int array_size) 4984 { 4985 if (array_size > 0 && domains) { 4986 domains[0] = g_bdev_memory_domain; 4987 } 4988 4989 return 1; 4990 } 4991 4992 static void 4993 bdev_get_memory_domains(void) 4994 { 4995 struct spdk_bdev_fn_table fn_table = { 4996 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 4997 }; 4998 struct spdk_bdev bdev = { .fn_table = &fn_table }; 4999 struct spdk_memory_domain *domains[2] = {}; 5000 int rc; 5001 5002 /* bdev is NULL */ 5003 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5004 CU_ASSERT(rc == -EINVAL); 5005 5006 /* domains is NULL */ 5007 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5008 CU_ASSERT(rc == 1); 5009 5010 /* array size is 0 */ 5011 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5012 CU_ASSERT(rc == 1); 5013 5014 /* get_supported_dma_device_types op is set */ 5015 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5016 CU_ASSERT(rc == 1); 5017 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5018 5019 /* get_supported_dma_device_types op is not set */ 5020 fn_table.get_memory_domains = NULL; 5021 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5022 CU_ASSERT(rc == 0); 5023 } 5024 5025 static void 5026 bdev_writev_readv_ext(void) 5027 { 5028 struct spdk_bdev *bdev; 5029 struct spdk_bdev_desc *desc = NULL; 5030 struct spdk_io_channel *io_ch; 5031 char io_buf[512]; 5032 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5033 struct ut_expected_io *expected_io; 5034 struct spdk_bdev_ext_io_opts ext_io_opts = { 5035 .metadata = (void *)0xFF000000, 5036 .size = sizeof(ext_io_opts) 5037 }; 5038 int rc; 5039 5040 spdk_bdev_initialize(bdev_init_cb, NULL); 5041 5042 bdev = allocate_bdev("bdev0"); 5043 bdev->md_interleave = false; 5044 bdev->md_len = 8; 5045 5046 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5047 CU_ASSERT(rc == 0); 5048 SPDK_CU_ASSERT_FATAL(desc != NULL); 5049 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5050 io_ch = spdk_bdev_get_io_channel(desc); 5051 CU_ASSERT(io_ch != NULL); 5052 5053 /* Test 1, Simple test */ 5054 g_io_done = false; 5055 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5056 expected_io->md_buf = ext_io_opts.metadata; 5057 expected_io->ext_io_opts = &ext_io_opts; 5058 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5059 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5060 5061 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5062 5063 CU_ASSERT(rc == 0); 5064 CU_ASSERT(g_io_done == false); 5065 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5066 stub_complete_io(1); 5067 CU_ASSERT(g_io_done == true); 5068 5069 g_io_done = false; 5070 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5071 expected_io->md_buf = ext_io_opts.metadata; 5072 expected_io->ext_io_opts = &ext_io_opts; 5073 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5074 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5075 5076 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5077 5078 CU_ASSERT(rc == 0); 5079 CU_ASSERT(g_io_done == false); 5080 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5081 stub_complete_io(1); 5082 CU_ASSERT(g_io_done == true); 5083 5084 /* Test 2, invalid ext_opts size */ 5085 ext_io_opts.size = 0; 5086 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5087 CU_ASSERT(rc != 0); 5088 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5089 CU_ASSERT(rc != 0); 5090 5091 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5092 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5093 CU_ASSERT(rc != 0); 5094 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5095 CU_ASSERT(rc != 0); 5096 5097 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5098 sizeof(ext_io_opts.metadata) - 1; 5099 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5100 CU_ASSERT(rc != 0); 5101 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5102 CU_ASSERT(rc != 0); 5103 5104 /* Test 3, Check that IO request with ext_opts and metadata is split correctly 5105 * Offset 14, length 8, payload 0xF000 5106 * Child - Offset 14, length 2, payload 0xF000 5107 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5108 */ 5109 bdev->optimal_io_boundary = 16; 5110 bdev->split_on_optimal_io_boundary = true; 5111 bdev->md_interleave = false; 5112 bdev->md_len = 8; 5113 5114 iov.iov_base = (void *)0xF000; 5115 iov.iov_len = 4096; 5116 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5117 ext_io_opts.metadata = (void *)0xFF000000; 5118 ext_io_opts.size = sizeof(ext_io_opts); 5119 g_io_done = false; 5120 5121 /* read */ 5122 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5123 expected_io->md_buf = ext_io_opts.metadata; 5124 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5125 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5126 5127 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5128 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5129 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5130 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5131 5132 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5133 CU_ASSERT(rc == 0); 5134 CU_ASSERT(g_io_done == false); 5135 5136 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5137 stub_complete_io(2); 5138 CU_ASSERT(g_io_done == true); 5139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5140 5141 /* write */ 5142 g_io_done = false; 5143 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5144 expected_io->md_buf = ext_io_opts.metadata; 5145 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5146 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5147 5148 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5149 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5150 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5151 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5152 5153 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5154 CU_ASSERT(rc == 0); 5155 CU_ASSERT(g_io_done == false); 5156 5157 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5158 stub_complete_io(2); 5159 CU_ASSERT(g_io_done == true); 5160 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5161 5162 /* Test 4, Verify data pull/push 5163 * bdev doens't support memory domains, so buffers from bdev memory pool will be used */ 5164 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5165 5166 g_io_done = false; 5167 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5168 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5169 expected_io->ext_io_opts = &ext_io_opts; 5170 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5171 5172 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5173 5174 CU_ASSERT(rc == 0); 5175 CU_ASSERT(g_io_done == false); 5176 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5177 stub_complete_io(1); 5178 CU_ASSERT(g_memory_domain_push_data_called == true); 5179 CU_ASSERT(g_io_done == true); 5180 5181 g_io_done = false; 5182 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5183 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5184 expected_io->ext_io_opts = &ext_io_opts; 5185 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5186 5187 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5188 5189 CU_ASSERT(rc == 0); 5190 CU_ASSERT(g_memory_domain_pull_data_called == true); 5191 CU_ASSERT(g_io_done == false); 5192 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5193 stub_complete_io(1); 5194 CU_ASSERT(g_io_done == true); 5195 5196 spdk_put_io_channel(io_ch); 5197 spdk_bdev_close(desc); 5198 free_bdev(bdev); 5199 spdk_bdev_finish(bdev_fini_cb, NULL); 5200 poll_threads(); 5201 } 5202 5203 static void 5204 bdev_register_uuid_alias(void) 5205 { 5206 struct spdk_bdev *bdev, *second; 5207 char uuid[SPDK_UUID_STRING_LEN]; 5208 int rc; 5209 5210 spdk_bdev_initialize(bdev_init_cb, NULL); 5211 bdev = allocate_bdev("bdev0"); 5212 5213 /* Make sure an UUID was generated */ 5214 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5215 5216 /* Check that an UUID alias was registered */ 5217 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5218 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5219 5220 /* Unregister the bdev */ 5221 spdk_bdev_unregister(bdev, NULL, NULL); 5222 poll_threads(); 5223 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5224 5225 /* Check the same, but this time register the bdev with non-zero UUID */ 5226 rc = spdk_bdev_register(bdev); 5227 CU_ASSERT_EQUAL(rc, 0); 5228 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5229 5230 /* Unregister the bdev */ 5231 spdk_bdev_unregister(bdev, NULL, NULL); 5232 poll_threads(); 5233 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5234 5235 /* Regiser the bdev using UUID as the name */ 5236 bdev->name = uuid; 5237 rc = spdk_bdev_register(bdev); 5238 CU_ASSERT_EQUAL(rc, 0); 5239 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5240 5241 /* Unregister the bdev */ 5242 spdk_bdev_unregister(bdev, NULL, NULL); 5243 poll_threads(); 5244 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5245 5246 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5247 bdev->name = "bdev0"; 5248 second = allocate_bdev("bdev1"); 5249 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5250 rc = spdk_bdev_register(bdev); 5251 CU_ASSERT_EQUAL(rc, -EEXIST); 5252 5253 /* Regenerate the UUID and re-check */ 5254 spdk_uuid_generate(&bdev->uuid); 5255 rc = spdk_bdev_register(bdev); 5256 CU_ASSERT_EQUAL(rc, 0); 5257 5258 /* And check that both bdevs can be retrieved through their UUIDs */ 5259 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5260 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5261 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5262 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5263 5264 free_bdev(second); 5265 free_bdev(bdev); 5266 spdk_bdev_finish(bdev_fini_cb, NULL); 5267 poll_threads(); 5268 } 5269 5270 static void 5271 bdev_unregister_by_name(void) 5272 { 5273 struct spdk_bdev *bdev; 5274 int rc; 5275 5276 bdev = allocate_bdev("bdev"); 5277 5278 g_event_type1 = 0xFF; 5279 g_unregister_arg = NULL; 5280 g_unregister_rc = -1; 5281 5282 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5283 CU_ASSERT(rc == -ENODEV); 5284 5285 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5286 CU_ASSERT(rc == -ENODEV); 5287 5288 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5289 CU_ASSERT(rc == 0); 5290 5291 /* Check that unregister callback is delayed */ 5292 CU_ASSERT(g_unregister_arg == NULL); 5293 CU_ASSERT(g_unregister_rc == -1); 5294 5295 poll_threads(); 5296 5297 /* Event callback shall not be issued because device was closed */ 5298 CU_ASSERT(g_event_type1 == 0xFF); 5299 /* Unregister callback is issued */ 5300 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5301 CU_ASSERT(g_unregister_rc == 0); 5302 5303 free_bdev(bdev); 5304 } 5305 5306 static int 5307 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5308 { 5309 int *count = ctx; 5310 5311 (*count)++; 5312 5313 return 0; 5314 } 5315 5316 static void 5317 for_each_bdev_test(void) 5318 { 5319 struct spdk_bdev *bdev[8]; 5320 int rc, count; 5321 5322 bdev[0] = allocate_bdev("bdev0"); 5323 5324 bdev[1] = allocate_bdev("bdev1"); 5325 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5326 CU_ASSERT(rc == 0); 5327 5328 bdev[2] = allocate_bdev("bdev2"); 5329 5330 bdev[3] = allocate_bdev("bdev3"); 5331 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5332 CU_ASSERT(rc == 0); 5333 5334 bdev[4] = allocate_bdev("bdev4"); 5335 5336 bdev[5] = allocate_bdev("bdev5"); 5337 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5338 CU_ASSERT(rc == 0); 5339 5340 bdev[6] = allocate_bdev("bdev6"); 5341 5342 bdev[7] = allocate_bdev("bdev7"); 5343 5344 count = 0; 5345 rc = spdk_for_each_bdev(&count, count_bdevs); 5346 CU_ASSERT(rc == 0); 5347 CU_ASSERT(count == 8); 5348 5349 count = 0; 5350 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5351 CU_ASSERT(rc == 0); 5352 CU_ASSERT(count == 5); 5353 5354 free_bdev(bdev[0]); 5355 free_bdev(bdev[1]); 5356 free_bdev(bdev[2]); 5357 free_bdev(bdev[3]); 5358 free_bdev(bdev[4]); 5359 free_bdev(bdev[5]); 5360 free_bdev(bdev[6]); 5361 free_bdev(bdev[7]); 5362 } 5363 5364 int 5365 main(int argc, char **argv) 5366 { 5367 CU_pSuite suite = NULL; 5368 unsigned int num_failures; 5369 5370 CU_set_error_action(CUEA_ABORT); 5371 CU_initialize_registry(); 5372 5373 suite = CU_add_suite("bdev", null_init, null_clean); 5374 5375 CU_ADD_TEST(suite, bytes_to_blocks_test); 5376 CU_ADD_TEST(suite, num_blocks_test); 5377 CU_ADD_TEST(suite, io_valid_test); 5378 CU_ADD_TEST(suite, open_write_test); 5379 CU_ADD_TEST(suite, claim_test); 5380 CU_ADD_TEST(suite, alias_add_del_test); 5381 CU_ADD_TEST(suite, get_device_stat_test); 5382 CU_ADD_TEST(suite, bdev_io_types_test); 5383 CU_ADD_TEST(suite, bdev_io_wait_test); 5384 CU_ADD_TEST(suite, bdev_io_spans_split_test); 5385 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 5386 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 5387 CU_ADD_TEST(suite, bdev_io_mix_split_test); 5388 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 5389 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 5390 CU_ADD_TEST(suite, bdev_io_alignment); 5391 CU_ADD_TEST(suite, bdev_histograms); 5392 CU_ADD_TEST(suite, bdev_write_zeroes); 5393 CU_ADD_TEST(suite, bdev_compare_and_write); 5394 CU_ADD_TEST(suite, bdev_compare); 5395 CU_ADD_TEST(suite, bdev_zcopy_write); 5396 CU_ADD_TEST(suite, bdev_zcopy_read); 5397 CU_ADD_TEST(suite, bdev_open_while_hotremove); 5398 CU_ADD_TEST(suite, bdev_close_while_hotremove); 5399 CU_ADD_TEST(suite, bdev_open_ext); 5400 CU_ADD_TEST(suite, bdev_open_ext_unregister); 5401 CU_ADD_TEST(suite, bdev_set_io_timeout); 5402 CU_ADD_TEST(suite, lba_range_overlap); 5403 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 5404 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 5405 CU_ADD_TEST(suite, lock_lba_range_overlapped); 5406 CU_ADD_TEST(suite, bdev_io_abort); 5407 CU_ADD_TEST(suite, bdev_unmap); 5408 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 5409 CU_ADD_TEST(suite, bdev_set_options_test); 5410 CU_ADD_TEST(suite, bdev_multi_allocation); 5411 CU_ADD_TEST(suite, bdev_get_memory_domains); 5412 CU_ADD_TEST(suite, bdev_writev_readv_ext); 5413 CU_ADD_TEST(suite, bdev_register_uuid_alias); 5414 CU_ADD_TEST(suite, bdev_unregister_by_name); 5415 CU_ADD_TEST(suite, for_each_bdev_test); 5416 5417 allocate_cores(1); 5418 allocate_threads(1); 5419 set_thread(0); 5420 5421 CU_basic_set_mode(CU_BRM_VERBOSE); 5422 CU_basic_run_tests(); 5423 num_failures = CU_get_number_of_failures(); 5424 CU_cleanup_registry(); 5425 5426 free_threads(); 5427 free_cores(); 5428 5429 return num_failures; 5430 } 5431