1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 25 static bool g_memory_domain_pull_data_called; 26 static bool g_memory_domain_push_data_called; 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 g_memory_domain_pull_data_called = true; 35 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 g_memory_domain_push_data_called = true; 47 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 int g_status; 53 int g_count; 54 enum spdk_bdev_event_type g_event_type1; 55 enum spdk_bdev_event_type g_event_type2; 56 enum spdk_bdev_event_type g_event_type3; 57 enum spdk_bdev_event_type g_event_type4; 58 struct spdk_histogram_data *g_histogram; 59 void *g_unregister_arg; 60 int g_unregister_rc; 61 62 void 63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 64 int *sc, int *sk, int *asc, int *ascq) 65 { 66 } 67 68 static int 69 null_init(void) 70 { 71 return 0; 72 } 73 74 static int 75 null_clean(void) 76 { 77 return 0; 78 } 79 80 static int 81 stub_destruct(void *ctx) 82 { 83 return 0; 84 } 85 86 struct ut_expected_io { 87 uint8_t type; 88 uint64_t offset; 89 uint64_t length; 90 int iovcnt; 91 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 92 void *md_buf; 93 struct spdk_bdev_ext_io_opts *ext_io_opts; 94 TAILQ_ENTRY(ut_expected_io) link; 95 }; 96 97 struct bdev_ut_channel { 98 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 99 uint32_t outstanding_io_count; 100 TAILQ_HEAD(, ut_expected_io) expected_io; 101 }; 102 103 static bool g_io_done; 104 static struct spdk_bdev_io *g_bdev_io; 105 static enum spdk_bdev_io_status g_io_status; 106 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 107 static uint32_t g_bdev_ut_io_device; 108 static struct bdev_ut_channel *g_bdev_ut_channel; 109 static void *g_compare_read_buf; 110 static uint32_t g_compare_read_buf_len; 111 static void *g_compare_write_buf; 112 static uint32_t g_compare_write_buf_len; 113 static bool g_abort_done; 114 static enum spdk_bdev_io_status g_abort_status; 115 static void *g_zcopy_read_buf; 116 static uint32_t g_zcopy_read_buf_len; 117 static void *g_zcopy_write_buf; 118 static uint32_t g_zcopy_write_buf_len; 119 static struct spdk_bdev_io *g_zcopy_bdev_io; 120 121 static struct ut_expected_io * 122 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 123 { 124 struct ut_expected_io *expected_io; 125 126 expected_io = calloc(1, sizeof(*expected_io)); 127 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 128 129 expected_io->type = type; 130 expected_io->offset = offset; 131 expected_io->length = length; 132 expected_io->iovcnt = iovcnt; 133 134 return expected_io; 135 } 136 137 static void 138 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 139 { 140 expected_io->iov[pos].iov_base = base; 141 expected_io->iov[pos].iov_len = len; 142 } 143 144 static void 145 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 146 { 147 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 148 struct ut_expected_io *expected_io; 149 struct iovec *iov, *expected_iov; 150 struct spdk_bdev_io *bio_to_abort; 151 int i; 152 153 g_bdev_io = bdev_io; 154 155 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 156 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 157 158 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 159 CU_ASSERT(g_compare_read_buf_len == len); 160 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 161 } 162 163 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 164 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 165 166 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 167 CU_ASSERT(g_compare_write_buf_len == len); 168 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 169 } 170 171 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 172 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 173 174 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 175 CU_ASSERT(g_compare_read_buf_len == len); 176 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 177 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 178 } 179 } 180 181 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 182 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 183 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 184 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 185 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 186 ch->outstanding_io_count--; 187 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 188 break; 189 } 190 } 191 } 192 } 193 194 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 195 if (bdev_io->u.bdev.zcopy.start) { 196 g_zcopy_bdev_io = bdev_io; 197 if (bdev_io->u.bdev.zcopy.populate) { 198 /* Start of a read */ 199 CU_ASSERT(g_zcopy_read_buf != NULL); 200 CU_ASSERT(g_zcopy_read_buf_len > 0); 201 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 202 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 203 bdev_io->u.bdev.iovcnt = 1; 204 } else { 205 /* Start of a write */ 206 CU_ASSERT(g_zcopy_write_buf != NULL); 207 CU_ASSERT(g_zcopy_write_buf_len > 0); 208 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 209 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 210 bdev_io->u.bdev.iovcnt = 1; 211 } 212 } else { 213 if (bdev_io->u.bdev.zcopy.commit) { 214 /* End of write */ 215 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 216 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 217 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 218 g_zcopy_write_buf = NULL; 219 g_zcopy_write_buf_len = 0; 220 } else { 221 /* End of read */ 222 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 223 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 224 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 225 g_zcopy_read_buf = NULL; 226 g_zcopy_read_buf_len = 0; 227 } 228 } 229 } 230 231 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 232 ch->outstanding_io_count++; 233 234 expected_io = TAILQ_FIRST(&ch->expected_io); 235 if (expected_io == NULL) { 236 return; 237 } 238 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 239 240 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 241 CU_ASSERT(bdev_io->type == expected_io->type); 242 } 243 244 if (expected_io->md_buf != NULL) { 245 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 246 if (bdev_io->u.bdev.ext_opts) { 247 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata); 248 } 249 } 250 251 if (expected_io->length == 0) { 252 free(expected_io); 253 return; 254 } 255 256 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 257 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 258 259 if (expected_io->iovcnt == 0) { 260 free(expected_io); 261 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 262 return; 263 } 264 265 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 266 for (i = 0; i < expected_io->iovcnt; i++) { 267 expected_iov = &expected_io->iov[i]; 268 if (bdev_io->internal.orig_iovcnt == 0) { 269 iov = &bdev_io->u.bdev.iovs[i]; 270 } else { 271 iov = bdev_io->internal.orig_iovs; 272 } 273 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 274 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 275 } 276 277 if (expected_io->ext_io_opts) { 278 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts) 279 } 280 281 free(expected_io); 282 } 283 284 static void 285 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 286 struct spdk_bdev_io *bdev_io, bool success) 287 { 288 CU_ASSERT(success == true); 289 290 stub_submit_request(_ch, bdev_io); 291 } 292 293 static void 294 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 295 { 296 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 297 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 298 } 299 300 static uint32_t 301 stub_complete_io(uint32_t num_to_complete) 302 { 303 struct bdev_ut_channel *ch = g_bdev_ut_channel; 304 struct spdk_bdev_io *bdev_io; 305 static enum spdk_bdev_io_status io_status; 306 uint32_t num_completed = 0; 307 308 while (num_completed < num_to_complete) { 309 if (TAILQ_EMPTY(&ch->outstanding_io)) { 310 break; 311 } 312 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 313 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 314 ch->outstanding_io_count--; 315 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 316 g_io_exp_status; 317 spdk_bdev_io_complete(bdev_io, io_status); 318 num_completed++; 319 } 320 321 return num_completed; 322 } 323 324 static struct spdk_io_channel * 325 bdev_ut_get_io_channel(void *ctx) 326 { 327 return spdk_get_io_channel(&g_bdev_ut_io_device); 328 } 329 330 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 331 [SPDK_BDEV_IO_TYPE_READ] = true, 332 [SPDK_BDEV_IO_TYPE_WRITE] = true, 333 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 334 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 335 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 336 [SPDK_BDEV_IO_TYPE_RESET] = true, 337 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 338 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 339 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 340 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 341 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 342 [SPDK_BDEV_IO_TYPE_ABORT] = true, 343 }; 344 345 static void 346 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 347 { 348 g_io_types_supported[io_type] = enable; 349 } 350 351 static bool 352 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 353 { 354 return g_io_types_supported[io_type]; 355 } 356 357 static struct spdk_bdev_fn_table fn_table = { 358 .destruct = stub_destruct, 359 .submit_request = stub_submit_request, 360 .get_io_channel = bdev_ut_get_io_channel, 361 .io_type_supported = stub_io_type_supported, 362 }; 363 364 static int 365 bdev_ut_create_ch(void *io_device, void *ctx_buf) 366 { 367 struct bdev_ut_channel *ch = ctx_buf; 368 369 CU_ASSERT(g_bdev_ut_channel == NULL); 370 g_bdev_ut_channel = ch; 371 372 TAILQ_INIT(&ch->outstanding_io); 373 ch->outstanding_io_count = 0; 374 TAILQ_INIT(&ch->expected_io); 375 return 0; 376 } 377 378 static void 379 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 380 { 381 CU_ASSERT(g_bdev_ut_channel != NULL); 382 g_bdev_ut_channel = NULL; 383 } 384 385 struct spdk_bdev_module bdev_ut_if; 386 387 static int 388 bdev_ut_module_init(void) 389 { 390 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 391 sizeof(struct bdev_ut_channel), NULL); 392 spdk_bdev_module_init_done(&bdev_ut_if); 393 return 0; 394 } 395 396 static void 397 bdev_ut_module_fini(void) 398 { 399 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 400 } 401 402 struct spdk_bdev_module bdev_ut_if = { 403 .name = "bdev_ut", 404 .module_init = bdev_ut_module_init, 405 .module_fini = bdev_ut_module_fini, 406 .async_init = true, 407 }; 408 409 static void vbdev_ut_examine(struct spdk_bdev *bdev); 410 411 static int 412 vbdev_ut_module_init(void) 413 { 414 return 0; 415 } 416 417 static void 418 vbdev_ut_module_fini(void) 419 { 420 } 421 422 struct spdk_bdev_module vbdev_ut_if = { 423 .name = "vbdev_ut", 424 .module_init = vbdev_ut_module_init, 425 .module_fini = vbdev_ut_module_fini, 426 .examine_config = vbdev_ut_examine, 427 }; 428 429 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 430 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 431 432 static void 433 vbdev_ut_examine(struct spdk_bdev *bdev) 434 { 435 spdk_bdev_module_examine_done(&vbdev_ut_if); 436 } 437 438 static struct spdk_bdev * 439 allocate_bdev(char *name) 440 { 441 struct spdk_bdev *bdev; 442 int rc; 443 444 bdev = calloc(1, sizeof(*bdev)); 445 SPDK_CU_ASSERT_FATAL(bdev != NULL); 446 447 bdev->name = name; 448 bdev->fn_table = &fn_table; 449 bdev->module = &bdev_ut_if; 450 bdev->blockcnt = 1024; 451 bdev->blocklen = 512; 452 453 rc = spdk_bdev_register(bdev); 454 poll_threads(); 455 CU_ASSERT(rc == 0); 456 457 return bdev; 458 } 459 460 static struct spdk_bdev * 461 allocate_vbdev(char *name) 462 { 463 struct spdk_bdev *bdev; 464 int rc; 465 466 bdev = calloc(1, sizeof(*bdev)); 467 SPDK_CU_ASSERT_FATAL(bdev != NULL); 468 469 bdev->name = name; 470 bdev->fn_table = &fn_table; 471 bdev->module = &vbdev_ut_if; 472 473 rc = spdk_bdev_register(bdev); 474 poll_threads(); 475 CU_ASSERT(rc == 0); 476 477 return bdev; 478 } 479 480 static void 481 free_bdev(struct spdk_bdev *bdev) 482 { 483 spdk_bdev_unregister(bdev, NULL, NULL); 484 poll_threads(); 485 memset(bdev, 0xFF, sizeof(*bdev)); 486 free(bdev); 487 } 488 489 static void 490 free_vbdev(struct spdk_bdev *bdev) 491 { 492 spdk_bdev_unregister(bdev, NULL, NULL); 493 poll_threads(); 494 memset(bdev, 0xFF, sizeof(*bdev)); 495 free(bdev); 496 } 497 498 static void 499 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 500 { 501 const char *bdev_name; 502 503 CU_ASSERT(bdev != NULL); 504 CU_ASSERT(rc == 0); 505 bdev_name = spdk_bdev_get_name(bdev); 506 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 507 508 free(stat); 509 510 *(bool *)cb_arg = true; 511 } 512 513 static void 514 bdev_unregister_cb(void *cb_arg, int rc) 515 { 516 g_unregister_arg = cb_arg; 517 g_unregister_rc = rc; 518 } 519 520 static void 521 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 522 { 523 } 524 525 static void 526 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 527 { 528 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 529 530 g_event_type1 = type; 531 if (SPDK_BDEV_EVENT_REMOVE == type) { 532 spdk_bdev_close(desc); 533 } 534 } 535 536 static void 537 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 538 { 539 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 540 541 g_event_type2 = type; 542 if (SPDK_BDEV_EVENT_REMOVE == type) { 543 spdk_bdev_close(desc); 544 } 545 } 546 547 static void 548 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 549 { 550 g_event_type3 = type; 551 } 552 553 static void 554 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 555 { 556 g_event_type4 = type; 557 } 558 559 static void 560 get_device_stat_test(void) 561 { 562 struct spdk_bdev *bdev; 563 struct spdk_bdev_io_stat *stat; 564 bool done; 565 566 bdev = allocate_bdev("bdev0"); 567 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 568 if (stat == NULL) { 569 free_bdev(bdev); 570 return; 571 } 572 573 done = false; 574 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 575 while (!done) { poll_threads(); } 576 577 free_bdev(bdev); 578 } 579 580 static void 581 open_write_test(void) 582 { 583 struct spdk_bdev *bdev[9]; 584 struct spdk_bdev_desc *desc[9] = {}; 585 int rc; 586 587 /* 588 * Create a tree of bdevs to test various open w/ write cases. 589 * 590 * bdev0 through bdev3 are physical block devices, such as NVMe 591 * namespaces or Ceph block devices. 592 * 593 * bdev4 is a virtual bdev with multiple base bdevs. This models 594 * caching or RAID use cases. 595 * 596 * bdev5 through bdev7 are all virtual bdevs with the same base 597 * bdev (except bdev7). This models partitioning or logical volume 598 * use cases. 599 * 600 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 601 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 602 * models caching, RAID, partitioning or logical volumes use cases. 603 * 604 * bdev8 is a virtual bdev with multiple base bdevs, but these 605 * base bdevs are themselves virtual bdevs. 606 * 607 * bdev8 608 * | 609 * +----------+ 610 * | | 611 * bdev4 bdev5 bdev6 bdev7 612 * | | | | 613 * +---+---+ +---+ + +---+---+ 614 * | | \ | / \ 615 * bdev0 bdev1 bdev2 bdev3 616 */ 617 618 bdev[0] = allocate_bdev("bdev0"); 619 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 620 CU_ASSERT(rc == 0); 621 622 bdev[1] = allocate_bdev("bdev1"); 623 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 624 CU_ASSERT(rc == 0); 625 626 bdev[2] = allocate_bdev("bdev2"); 627 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 628 CU_ASSERT(rc == 0); 629 630 bdev[3] = allocate_bdev("bdev3"); 631 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 632 CU_ASSERT(rc == 0); 633 634 bdev[4] = allocate_vbdev("bdev4"); 635 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 636 CU_ASSERT(rc == 0); 637 638 bdev[5] = allocate_vbdev("bdev5"); 639 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 640 CU_ASSERT(rc == 0); 641 642 bdev[6] = allocate_vbdev("bdev6"); 643 644 bdev[7] = allocate_vbdev("bdev7"); 645 646 bdev[8] = allocate_vbdev("bdev8"); 647 648 /* Open bdev0 read-only. This should succeed. */ 649 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 650 CU_ASSERT(rc == 0); 651 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 652 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 653 spdk_bdev_close(desc[0]); 654 655 /* 656 * Open bdev1 read/write. This should fail since bdev1 has been claimed 657 * by a vbdev module. 658 */ 659 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 660 CU_ASSERT(rc == -EPERM); 661 662 /* 663 * Open bdev4 read/write. This should fail since bdev3 has been claimed 664 * by a vbdev module. 665 */ 666 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 667 CU_ASSERT(rc == -EPERM); 668 669 /* Open bdev4 read-only. This should succeed. */ 670 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 671 CU_ASSERT(rc == 0); 672 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 673 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 674 spdk_bdev_close(desc[4]); 675 676 /* 677 * Open bdev8 read/write. This should succeed since it is a leaf 678 * bdev. 679 */ 680 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 681 CU_ASSERT(rc == 0); 682 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 683 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 684 spdk_bdev_close(desc[8]); 685 686 /* 687 * Open bdev5 read/write. This should fail since bdev4 has been claimed 688 * by a vbdev module. 689 */ 690 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 691 CU_ASSERT(rc == -EPERM); 692 693 /* Open bdev4 read-only. This should succeed. */ 694 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 695 CU_ASSERT(rc == 0); 696 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 697 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 698 spdk_bdev_close(desc[5]); 699 700 free_vbdev(bdev[8]); 701 702 free_vbdev(bdev[5]); 703 free_vbdev(bdev[6]); 704 free_vbdev(bdev[7]); 705 706 free_vbdev(bdev[4]); 707 708 free_bdev(bdev[0]); 709 free_bdev(bdev[1]); 710 free_bdev(bdev[2]); 711 free_bdev(bdev[3]); 712 } 713 714 static void 715 claim_test(void) 716 { 717 struct spdk_bdev *bdev; 718 struct spdk_bdev_desc *desc, *open_desc; 719 int rc; 720 uint32_t count; 721 722 /* 723 * A vbdev that uses a read-only bdev may need it to remain read-only. 724 * To do so, it opens the bdev read-only, then claims it without 725 * passing a spdk_bdev_desc. 726 */ 727 bdev = allocate_bdev("bdev0"); 728 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 729 CU_ASSERT(rc == 0); 730 CU_ASSERT(desc->write == false); 731 732 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 733 CU_ASSERT(rc == 0); 734 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 735 736 /* There should be only one open descriptor and it should still be ro */ 737 count = 0; 738 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 739 CU_ASSERT(open_desc == desc); 740 CU_ASSERT(!open_desc->write); 741 count++; 742 } 743 CU_ASSERT(count == 1); 744 745 /* A read-only bdev is upgraded to read-write if desc is passed. */ 746 spdk_bdev_module_release_bdev(bdev); 747 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 748 CU_ASSERT(rc == 0); 749 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 750 751 /* There should be only one open descriptor and it should be rw */ 752 count = 0; 753 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 754 CU_ASSERT(open_desc == desc); 755 CU_ASSERT(open_desc->write); 756 count++; 757 } 758 CU_ASSERT(count == 1); 759 760 spdk_bdev_close(desc); 761 free_bdev(bdev); 762 } 763 764 static void 765 bytes_to_blocks_test(void) 766 { 767 struct spdk_bdev bdev; 768 uint64_t offset_blocks, num_blocks; 769 770 memset(&bdev, 0, sizeof(bdev)); 771 772 bdev.blocklen = 512; 773 774 /* All parameters valid */ 775 offset_blocks = 0; 776 num_blocks = 0; 777 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 778 CU_ASSERT(offset_blocks == 1); 779 CU_ASSERT(num_blocks == 2); 780 781 /* Offset not a block multiple */ 782 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 783 784 /* Length not a block multiple */ 785 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 786 787 /* In case blocklen not the power of two */ 788 bdev.blocklen = 100; 789 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 790 CU_ASSERT(offset_blocks == 1); 791 CU_ASSERT(num_blocks == 2); 792 793 /* Offset not a block multiple */ 794 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 795 796 /* Length not a block multiple */ 797 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 798 } 799 800 static void 801 num_blocks_test(void) 802 { 803 struct spdk_bdev bdev; 804 struct spdk_bdev_desc *desc = NULL; 805 int rc; 806 807 memset(&bdev, 0, sizeof(bdev)); 808 bdev.name = "num_blocks"; 809 bdev.fn_table = &fn_table; 810 bdev.module = &bdev_ut_if; 811 spdk_bdev_register(&bdev); 812 poll_threads(); 813 spdk_bdev_notify_blockcnt_change(&bdev, 50); 814 815 /* Growing block number */ 816 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 817 /* Shrinking block number */ 818 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 819 820 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 821 CU_ASSERT(rc == 0); 822 SPDK_CU_ASSERT_FATAL(desc != NULL); 823 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 824 825 /* Growing block number */ 826 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 827 /* Shrinking block number */ 828 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 829 830 g_event_type1 = 0xFF; 831 /* Growing block number */ 832 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 833 834 poll_threads(); 835 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 836 837 g_event_type1 = 0xFF; 838 /* Growing block number and closing */ 839 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 840 841 spdk_bdev_close(desc); 842 spdk_bdev_unregister(&bdev, NULL, NULL); 843 844 poll_threads(); 845 846 /* Callback is not called for closed device */ 847 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 848 } 849 850 static void 851 io_valid_test(void) 852 { 853 struct spdk_bdev bdev; 854 855 memset(&bdev, 0, sizeof(bdev)); 856 857 bdev.blocklen = 512; 858 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 859 860 spdk_bdev_notify_blockcnt_change(&bdev, 100); 861 862 /* All parameters valid */ 863 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 864 865 /* Last valid block */ 866 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 867 868 /* Offset past end of bdev */ 869 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 870 871 /* Offset + length past end of bdev */ 872 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 873 874 /* Offset near end of uint64_t range (2^64 - 1) */ 875 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 876 877 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 878 } 879 880 static void 881 alias_add_del_test(void) 882 { 883 struct spdk_bdev *bdev[3]; 884 int rc; 885 886 /* Creating and registering bdevs */ 887 bdev[0] = allocate_bdev("bdev0"); 888 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 889 890 bdev[1] = allocate_bdev("bdev1"); 891 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 892 893 bdev[2] = allocate_bdev("bdev2"); 894 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 895 896 poll_threads(); 897 898 /* 899 * Trying adding an alias identical to name. 900 * Alias is identical to name, so it can not be added to aliases list 901 */ 902 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 903 CU_ASSERT(rc == -EEXIST); 904 905 /* 906 * Trying to add empty alias, 907 * this one should fail 908 */ 909 rc = spdk_bdev_alias_add(bdev[0], NULL); 910 CU_ASSERT(rc == -EINVAL); 911 912 /* Trying adding same alias to two different registered bdevs */ 913 914 /* Alias is used first time, so this one should pass */ 915 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 916 CU_ASSERT(rc == 0); 917 918 /* Alias was added to another bdev, so this one should fail */ 919 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 920 CU_ASSERT(rc == -EEXIST); 921 922 /* Alias is used first time, so this one should pass */ 923 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 924 CU_ASSERT(rc == 0); 925 926 /* Trying removing an alias from registered bdevs */ 927 928 /* Alias is not on a bdev aliases list, so this one should fail */ 929 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 930 CU_ASSERT(rc == -ENOENT); 931 932 /* Alias is present on a bdev aliases list, so this one should pass */ 933 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 934 CU_ASSERT(rc == 0); 935 936 /* Alias is present on a bdev aliases list, so this one should pass */ 937 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 938 CU_ASSERT(rc == 0); 939 940 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 941 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 942 CU_ASSERT(rc != 0); 943 944 /* Trying to del all alias from empty alias list */ 945 spdk_bdev_alias_del_all(bdev[2]); 946 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 947 948 /* Trying to del all alias from non-empty alias list */ 949 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 950 CU_ASSERT(rc == 0); 951 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 952 CU_ASSERT(rc == 0); 953 spdk_bdev_alias_del_all(bdev[2]); 954 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 955 956 /* Unregister and free bdevs */ 957 spdk_bdev_unregister(bdev[0], NULL, NULL); 958 spdk_bdev_unregister(bdev[1], NULL, NULL); 959 spdk_bdev_unregister(bdev[2], NULL, NULL); 960 961 poll_threads(); 962 963 free(bdev[0]); 964 free(bdev[1]); 965 free(bdev[2]); 966 } 967 968 static void 969 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 970 { 971 g_io_done = true; 972 g_io_status = bdev_io->internal.status; 973 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 974 (bdev_io->u.bdev.zcopy.start)) { 975 g_zcopy_bdev_io = bdev_io; 976 } else { 977 spdk_bdev_free_io(bdev_io); 978 g_zcopy_bdev_io = NULL; 979 } 980 } 981 982 static void 983 bdev_init_cb(void *arg, int rc) 984 { 985 CU_ASSERT(rc == 0); 986 } 987 988 static void 989 bdev_fini_cb(void *arg) 990 { 991 } 992 993 struct bdev_ut_io_wait_entry { 994 struct spdk_bdev_io_wait_entry entry; 995 struct spdk_io_channel *io_ch; 996 struct spdk_bdev_desc *desc; 997 bool submitted; 998 }; 999 1000 static void 1001 io_wait_cb(void *arg) 1002 { 1003 struct bdev_ut_io_wait_entry *entry = arg; 1004 int rc; 1005 1006 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1007 CU_ASSERT(rc == 0); 1008 entry->submitted = true; 1009 } 1010 1011 static void 1012 bdev_io_types_test(void) 1013 { 1014 struct spdk_bdev *bdev; 1015 struct spdk_bdev_desc *desc = NULL; 1016 struct spdk_io_channel *io_ch; 1017 struct spdk_bdev_opts bdev_opts = {}; 1018 int rc; 1019 1020 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1021 bdev_opts.bdev_io_pool_size = 4; 1022 bdev_opts.bdev_io_cache_size = 2; 1023 1024 rc = spdk_bdev_set_opts(&bdev_opts); 1025 CU_ASSERT(rc == 0); 1026 spdk_bdev_initialize(bdev_init_cb, NULL); 1027 poll_threads(); 1028 1029 bdev = allocate_bdev("bdev0"); 1030 1031 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1032 CU_ASSERT(rc == 0); 1033 poll_threads(); 1034 SPDK_CU_ASSERT_FATAL(desc != NULL); 1035 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1036 io_ch = spdk_bdev_get_io_channel(desc); 1037 CU_ASSERT(io_ch != NULL); 1038 1039 /* WRITE and WRITE ZEROES are not supported */ 1040 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1041 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1042 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1043 CU_ASSERT(rc == -ENOTSUP); 1044 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1045 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1046 1047 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1048 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1049 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1050 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1051 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1052 CU_ASSERT(rc == -ENOTSUP); 1053 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1054 CU_ASSERT(rc == -ENOTSUP); 1055 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1056 CU_ASSERT(rc == -ENOTSUP); 1057 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1058 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1059 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1060 1061 spdk_put_io_channel(io_ch); 1062 spdk_bdev_close(desc); 1063 free_bdev(bdev); 1064 spdk_bdev_finish(bdev_fini_cb, NULL); 1065 poll_threads(); 1066 } 1067 1068 static void 1069 bdev_io_wait_test(void) 1070 { 1071 struct spdk_bdev *bdev; 1072 struct spdk_bdev_desc *desc = NULL; 1073 struct spdk_io_channel *io_ch; 1074 struct spdk_bdev_opts bdev_opts = {}; 1075 struct bdev_ut_io_wait_entry io_wait_entry; 1076 struct bdev_ut_io_wait_entry io_wait_entry2; 1077 int rc; 1078 1079 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1080 bdev_opts.bdev_io_pool_size = 4; 1081 bdev_opts.bdev_io_cache_size = 2; 1082 1083 rc = spdk_bdev_set_opts(&bdev_opts); 1084 CU_ASSERT(rc == 0); 1085 spdk_bdev_initialize(bdev_init_cb, NULL); 1086 poll_threads(); 1087 1088 bdev = allocate_bdev("bdev0"); 1089 1090 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1091 CU_ASSERT(rc == 0); 1092 poll_threads(); 1093 SPDK_CU_ASSERT_FATAL(desc != NULL); 1094 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1095 io_ch = spdk_bdev_get_io_channel(desc); 1096 CU_ASSERT(io_ch != NULL); 1097 1098 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1099 CU_ASSERT(rc == 0); 1100 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1101 CU_ASSERT(rc == 0); 1102 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1103 CU_ASSERT(rc == 0); 1104 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1105 CU_ASSERT(rc == 0); 1106 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1107 1108 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1109 CU_ASSERT(rc == -ENOMEM); 1110 1111 io_wait_entry.entry.bdev = bdev; 1112 io_wait_entry.entry.cb_fn = io_wait_cb; 1113 io_wait_entry.entry.cb_arg = &io_wait_entry; 1114 io_wait_entry.io_ch = io_ch; 1115 io_wait_entry.desc = desc; 1116 io_wait_entry.submitted = false; 1117 /* Cannot use the same io_wait_entry for two different calls. */ 1118 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1119 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1120 1121 /* Queue two I/O waits. */ 1122 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1123 CU_ASSERT(rc == 0); 1124 CU_ASSERT(io_wait_entry.submitted == false); 1125 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1126 CU_ASSERT(rc == 0); 1127 CU_ASSERT(io_wait_entry2.submitted == false); 1128 1129 stub_complete_io(1); 1130 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1131 CU_ASSERT(io_wait_entry.submitted == true); 1132 CU_ASSERT(io_wait_entry2.submitted == false); 1133 1134 stub_complete_io(1); 1135 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1136 CU_ASSERT(io_wait_entry2.submitted == true); 1137 1138 stub_complete_io(4); 1139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1140 1141 spdk_put_io_channel(io_ch); 1142 spdk_bdev_close(desc); 1143 free_bdev(bdev); 1144 spdk_bdev_finish(bdev_fini_cb, NULL); 1145 poll_threads(); 1146 } 1147 1148 static void 1149 bdev_io_spans_split_test(void) 1150 { 1151 struct spdk_bdev bdev; 1152 struct spdk_bdev_io bdev_io; 1153 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1154 1155 memset(&bdev, 0, sizeof(bdev)); 1156 bdev_io.u.bdev.iovs = iov; 1157 1158 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1159 bdev.optimal_io_boundary = 0; 1160 bdev.max_segment_size = 0; 1161 bdev.max_num_segments = 0; 1162 bdev_io.bdev = &bdev; 1163 1164 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1165 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1166 1167 bdev.split_on_optimal_io_boundary = true; 1168 bdev.optimal_io_boundary = 32; 1169 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1170 1171 /* RESETs are not based on LBAs - so this should return false. */ 1172 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1173 1174 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1175 bdev_io.u.bdev.offset_blocks = 0; 1176 bdev_io.u.bdev.num_blocks = 32; 1177 1178 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1179 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1180 1181 bdev_io.u.bdev.num_blocks = 33; 1182 1183 /* This I/O spans a boundary. */ 1184 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1185 1186 bdev_io.u.bdev.num_blocks = 32; 1187 bdev.max_segment_size = 512 * 32; 1188 bdev.max_num_segments = 1; 1189 bdev_io.u.bdev.iovcnt = 1; 1190 iov[0].iov_len = 512; 1191 1192 /* Does not cross and exceed max_size or max_segs */ 1193 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1194 1195 bdev.split_on_optimal_io_boundary = false; 1196 bdev.max_segment_size = 512; 1197 bdev.max_num_segments = 1; 1198 bdev_io.u.bdev.iovcnt = 2; 1199 1200 /* Exceed max_segs */ 1201 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1202 1203 bdev.max_num_segments = 2; 1204 iov[0].iov_len = 513; 1205 iov[1].iov_len = 512; 1206 1207 /* Exceed max_sizes */ 1208 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1209 } 1210 1211 static void 1212 bdev_io_boundary_split_test(void) 1213 { 1214 struct spdk_bdev *bdev; 1215 struct spdk_bdev_desc *desc = NULL; 1216 struct spdk_io_channel *io_ch; 1217 struct spdk_bdev_opts bdev_opts = {}; 1218 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1219 struct ut_expected_io *expected_io; 1220 void *md_buf = (void *)0xFF000000; 1221 uint64_t i; 1222 int rc; 1223 1224 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1225 bdev_opts.bdev_io_pool_size = 512; 1226 bdev_opts.bdev_io_cache_size = 64; 1227 1228 rc = spdk_bdev_set_opts(&bdev_opts); 1229 CU_ASSERT(rc == 0); 1230 spdk_bdev_initialize(bdev_init_cb, NULL); 1231 1232 bdev = allocate_bdev("bdev0"); 1233 1234 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1235 CU_ASSERT(rc == 0); 1236 SPDK_CU_ASSERT_FATAL(desc != NULL); 1237 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1238 io_ch = spdk_bdev_get_io_channel(desc); 1239 CU_ASSERT(io_ch != NULL); 1240 1241 bdev->optimal_io_boundary = 16; 1242 bdev->split_on_optimal_io_boundary = false; 1243 1244 g_io_done = false; 1245 1246 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1247 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1248 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1249 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1250 1251 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1252 CU_ASSERT(rc == 0); 1253 CU_ASSERT(g_io_done == false); 1254 1255 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1256 stub_complete_io(1); 1257 CU_ASSERT(g_io_done == true); 1258 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1259 1260 bdev->split_on_optimal_io_boundary = true; 1261 bdev->md_interleave = false; 1262 bdev->md_len = 8; 1263 1264 /* Now test that a single-vector command is split correctly. 1265 * Offset 14, length 8, payload 0xF000 1266 * Child - Offset 14, length 2, payload 0xF000 1267 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1268 * 1269 * Set up the expected values before calling spdk_bdev_read_blocks 1270 */ 1271 g_io_done = false; 1272 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1273 expected_io->md_buf = md_buf; 1274 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1275 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1276 1277 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1278 expected_io->md_buf = md_buf + 2 * 8; 1279 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1280 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1281 1282 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1283 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1284 14, 8, io_done, NULL); 1285 CU_ASSERT(rc == 0); 1286 CU_ASSERT(g_io_done == false); 1287 1288 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1289 stub_complete_io(2); 1290 CU_ASSERT(g_io_done == true); 1291 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1292 1293 /* Now set up a more complex, multi-vector command that needs to be split, 1294 * including splitting iovecs. 1295 */ 1296 iov[0].iov_base = (void *)0x10000; 1297 iov[0].iov_len = 512; 1298 iov[1].iov_base = (void *)0x20000; 1299 iov[1].iov_len = 20 * 512; 1300 iov[2].iov_base = (void *)0x30000; 1301 iov[2].iov_len = 11 * 512; 1302 1303 g_io_done = false; 1304 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1305 expected_io->md_buf = md_buf; 1306 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1307 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1308 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1309 1310 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1311 expected_io->md_buf = md_buf + 2 * 8; 1312 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1313 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1314 1315 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1316 expected_io->md_buf = md_buf + 18 * 8; 1317 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1318 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1319 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1320 1321 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1322 14, 32, io_done, NULL); 1323 CU_ASSERT(rc == 0); 1324 CU_ASSERT(g_io_done == false); 1325 1326 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1327 stub_complete_io(3); 1328 CU_ASSERT(g_io_done == true); 1329 1330 /* Test multi vector command that needs to be split by strip and then needs to be 1331 * split further due to the capacity of child iovs. 1332 */ 1333 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1334 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1335 iov[i].iov_len = 512; 1336 } 1337 1338 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1339 g_io_done = false; 1340 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1341 BDEV_IO_NUM_CHILD_IOV); 1342 expected_io->md_buf = md_buf; 1343 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1344 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1345 } 1346 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1347 1348 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1349 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1350 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1351 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1352 ut_expected_io_set_iov(expected_io, i, 1353 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1354 } 1355 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1356 1357 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1358 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1359 CU_ASSERT(rc == 0); 1360 CU_ASSERT(g_io_done == false); 1361 1362 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1363 stub_complete_io(1); 1364 CU_ASSERT(g_io_done == false); 1365 1366 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1367 stub_complete_io(1); 1368 CU_ASSERT(g_io_done == true); 1369 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1370 1371 /* Test multi vector command that needs to be split by strip and then needs to be 1372 * split further due to the capacity of child iovs. In this case, the length of 1373 * the rest of iovec array with an I/O boundary is the multiple of block size. 1374 */ 1375 1376 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1377 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1378 */ 1379 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1380 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1381 iov[i].iov_len = 512; 1382 } 1383 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1384 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1385 iov[i].iov_len = 256; 1386 } 1387 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1388 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1389 1390 /* Add an extra iovec to trigger split */ 1391 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1392 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1393 1394 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1395 g_io_done = false; 1396 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1397 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1398 expected_io->md_buf = md_buf; 1399 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1400 ut_expected_io_set_iov(expected_io, i, 1401 (void *)((i + 1) * 0x10000), 512); 1402 } 1403 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1404 ut_expected_io_set_iov(expected_io, i, 1405 (void *)((i + 1) * 0x10000), 256); 1406 } 1407 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1408 1409 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1410 1, 1); 1411 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1412 ut_expected_io_set_iov(expected_io, 0, 1413 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1415 1416 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1417 1, 1); 1418 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1419 ut_expected_io_set_iov(expected_io, 0, 1420 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1421 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1422 1423 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1424 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1425 CU_ASSERT(rc == 0); 1426 CU_ASSERT(g_io_done == false); 1427 1428 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1429 stub_complete_io(1); 1430 CU_ASSERT(g_io_done == false); 1431 1432 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1433 stub_complete_io(2); 1434 CU_ASSERT(g_io_done == true); 1435 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1436 1437 /* Test multi vector command that needs to be split by strip and then needs to be 1438 * split further due to the capacity of child iovs, the child request offset should 1439 * be rewind to last aligned offset and go success without error. 1440 */ 1441 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1442 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1443 iov[i].iov_len = 512; 1444 } 1445 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1446 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1447 1448 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1449 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1450 1451 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1452 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1453 1454 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1455 g_io_done = false; 1456 g_io_status = 0; 1457 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1459 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1460 expected_io->md_buf = md_buf; 1461 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1462 ut_expected_io_set_iov(expected_io, i, 1463 (void *)((i + 1) * 0x10000), 512); 1464 } 1465 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1466 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1467 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1468 1, 2); 1469 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1470 ut_expected_io_set_iov(expected_io, 0, 1471 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1472 ut_expected_io_set_iov(expected_io, 1, 1473 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1474 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1475 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1476 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1477 1, 1); 1478 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1479 ut_expected_io_set_iov(expected_io, 0, 1480 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1481 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1482 1483 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1484 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1485 CU_ASSERT(rc == 0); 1486 CU_ASSERT(g_io_done == false); 1487 1488 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1489 stub_complete_io(1); 1490 CU_ASSERT(g_io_done == false); 1491 1492 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1493 stub_complete_io(2); 1494 CU_ASSERT(g_io_done == true); 1495 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1496 1497 /* Test multi vector command that needs to be split due to the IO boundary and 1498 * the capacity of child iovs. Especially test the case when the command is 1499 * split due to the capacity of child iovs, the tail address is not aligned with 1500 * block size and is rewinded to the aligned address. 1501 * 1502 * The iovecs used in read request is complex but is based on the data 1503 * collected in the real issue. We change the base addresses but keep the lengths 1504 * not to loose the credibility of the test. 1505 */ 1506 bdev->optimal_io_boundary = 128; 1507 g_io_done = false; 1508 g_io_status = 0; 1509 1510 for (i = 0; i < 31; i++) { 1511 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1512 iov[i].iov_len = 1024; 1513 } 1514 iov[31].iov_base = (void *)0xFEED1F00000; 1515 iov[31].iov_len = 32768; 1516 iov[32].iov_base = (void *)0xFEED2000000; 1517 iov[32].iov_len = 160; 1518 iov[33].iov_base = (void *)0xFEED2100000; 1519 iov[33].iov_len = 4096; 1520 iov[34].iov_base = (void *)0xFEED2200000; 1521 iov[34].iov_len = 4096; 1522 iov[35].iov_base = (void *)0xFEED2300000; 1523 iov[35].iov_len = 4096; 1524 iov[36].iov_base = (void *)0xFEED2400000; 1525 iov[36].iov_len = 4096; 1526 iov[37].iov_base = (void *)0xFEED2500000; 1527 iov[37].iov_len = 4096; 1528 iov[38].iov_base = (void *)0xFEED2600000; 1529 iov[38].iov_len = 4096; 1530 iov[39].iov_base = (void *)0xFEED2700000; 1531 iov[39].iov_len = 4096; 1532 iov[40].iov_base = (void *)0xFEED2800000; 1533 iov[40].iov_len = 4096; 1534 iov[41].iov_base = (void *)0xFEED2900000; 1535 iov[41].iov_len = 4096; 1536 iov[42].iov_base = (void *)0xFEED2A00000; 1537 iov[42].iov_len = 4096; 1538 iov[43].iov_base = (void *)0xFEED2B00000; 1539 iov[43].iov_len = 12288; 1540 iov[44].iov_base = (void *)0xFEED2C00000; 1541 iov[44].iov_len = 8192; 1542 iov[45].iov_base = (void *)0xFEED2F00000; 1543 iov[45].iov_len = 4096; 1544 iov[46].iov_base = (void *)0xFEED3000000; 1545 iov[46].iov_len = 4096; 1546 iov[47].iov_base = (void *)0xFEED3100000; 1547 iov[47].iov_len = 4096; 1548 iov[48].iov_base = (void *)0xFEED3200000; 1549 iov[48].iov_len = 24576; 1550 iov[49].iov_base = (void *)0xFEED3300000; 1551 iov[49].iov_len = 16384; 1552 iov[50].iov_base = (void *)0xFEED3400000; 1553 iov[50].iov_len = 12288; 1554 iov[51].iov_base = (void *)0xFEED3500000; 1555 iov[51].iov_len = 4096; 1556 iov[52].iov_base = (void *)0xFEED3600000; 1557 iov[52].iov_len = 4096; 1558 iov[53].iov_base = (void *)0xFEED3700000; 1559 iov[53].iov_len = 4096; 1560 iov[54].iov_base = (void *)0xFEED3800000; 1561 iov[54].iov_len = 28672; 1562 iov[55].iov_base = (void *)0xFEED3900000; 1563 iov[55].iov_len = 20480; 1564 iov[56].iov_base = (void *)0xFEED3A00000; 1565 iov[56].iov_len = 4096; 1566 iov[57].iov_base = (void *)0xFEED3B00000; 1567 iov[57].iov_len = 12288; 1568 iov[58].iov_base = (void *)0xFEED3C00000; 1569 iov[58].iov_len = 4096; 1570 iov[59].iov_base = (void *)0xFEED3D00000; 1571 iov[59].iov_len = 4096; 1572 iov[60].iov_base = (void *)0xFEED3E00000; 1573 iov[60].iov_len = 352; 1574 1575 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1576 * of child iovs, 1577 */ 1578 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1579 expected_io->md_buf = md_buf; 1580 for (i = 0; i < 32; i++) { 1581 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1582 } 1583 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1584 1585 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1586 * split by the IO boundary requirement. 1587 */ 1588 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1589 expected_io->md_buf = md_buf + 126 * 8; 1590 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1591 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1592 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1593 1594 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1595 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1596 */ 1597 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1598 expected_io->md_buf = md_buf + 128 * 8; 1599 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1600 iov[33].iov_len - 864); 1601 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1602 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1603 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1604 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1605 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1606 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1607 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1608 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1609 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1610 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1611 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1612 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1613 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1614 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1615 1616 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1617 * first 864 bytes of iov[52] split by the IO boundary requirement. 1618 */ 1619 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1620 expected_io->md_buf = md_buf + 256 * 8; 1621 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1622 iov[46].iov_len - 864); 1623 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1624 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1625 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1626 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1627 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1628 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1629 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1630 1631 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1632 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1633 */ 1634 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1635 expected_io->md_buf = md_buf + 384 * 8; 1636 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1637 iov[52].iov_len - 864); 1638 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1639 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1640 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1641 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1642 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1643 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1644 1645 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1646 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1647 */ 1648 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1649 expected_io->md_buf = md_buf + 512 * 8; 1650 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1651 iov[57].iov_len - 4960); 1652 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1653 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1654 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1655 1656 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1657 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1658 expected_io->md_buf = md_buf + 542 * 8; 1659 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1660 iov[59].iov_len - 3936); 1661 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1662 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1663 1664 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1665 0, 543, io_done, NULL); 1666 CU_ASSERT(rc == 0); 1667 CU_ASSERT(g_io_done == false); 1668 1669 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1670 stub_complete_io(1); 1671 CU_ASSERT(g_io_done == false); 1672 1673 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1674 stub_complete_io(5); 1675 CU_ASSERT(g_io_done == false); 1676 1677 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1678 stub_complete_io(1); 1679 CU_ASSERT(g_io_done == true); 1680 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1681 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1682 1683 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1684 * split, so test that. 1685 */ 1686 bdev->optimal_io_boundary = 15; 1687 g_io_done = false; 1688 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1689 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1690 1691 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1692 CU_ASSERT(rc == 0); 1693 CU_ASSERT(g_io_done == false); 1694 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1695 stub_complete_io(1); 1696 CU_ASSERT(g_io_done == true); 1697 1698 /* Test an UNMAP. This should also not be split. */ 1699 bdev->optimal_io_boundary = 16; 1700 g_io_done = false; 1701 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1702 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1703 1704 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1705 CU_ASSERT(rc == 0); 1706 CU_ASSERT(g_io_done == false); 1707 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1708 stub_complete_io(1); 1709 CU_ASSERT(g_io_done == true); 1710 1711 /* Test a FLUSH. This should also not be split. */ 1712 bdev->optimal_io_boundary = 16; 1713 g_io_done = false; 1714 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1715 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1716 1717 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1718 CU_ASSERT(rc == 0); 1719 CU_ASSERT(g_io_done == false); 1720 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1721 stub_complete_io(1); 1722 CU_ASSERT(g_io_done == true); 1723 1724 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1725 1726 /* Children requests return an error status */ 1727 bdev->optimal_io_boundary = 16; 1728 iov[0].iov_base = (void *)0x10000; 1729 iov[0].iov_len = 512 * 64; 1730 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1731 g_io_done = false; 1732 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1733 1734 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1735 CU_ASSERT(rc == 0); 1736 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1737 stub_complete_io(4); 1738 CU_ASSERT(g_io_done == false); 1739 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1740 stub_complete_io(1); 1741 CU_ASSERT(g_io_done == true); 1742 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1743 1744 /* Test if a multi vector command terminated with failure before continuing 1745 * splitting process when one of child I/O failed. 1746 * The multi vector command is as same as the above that needs to be split by strip 1747 * and then needs to be split further due to the capacity of child iovs. 1748 */ 1749 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1750 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1751 iov[i].iov_len = 512; 1752 } 1753 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1754 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1755 1756 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1757 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1758 1759 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1760 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1761 1762 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1763 1764 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1765 g_io_done = false; 1766 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1767 1768 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1769 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1770 CU_ASSERT(rc == 0); 1771 CU_ASSERT(g_io_done == false); 1772 1773 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1774 stub_complete_io(1); 1775 CU_ASSERT(g_io_done == true); 1776 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1777 1778 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1779 1780 /* for this test we will create the following conditions to hit the code path where 1781 * we are trying to send and IO following a split that has no iovs because we had to 1782 * trim them for alignment reasons. 1783 * 1784 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1785 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1786 * position 30 and overshoot by 0x2e. 1787 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1788 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1789 * which eliniates that vector so we just send the first split IO with 30 vectors 1790 * and let the completion pick up the last 2 vectors. 1791 */ 1792 bdev->optimal_io_boundary = 32; 1793 bdev->split_on_optimal_io_boundary = true; 1794 g_io_done = false; 1795 1796 /* Init all parent IOVs to 0x212 */ 1797 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1798 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1799 iov[i].iov_len = 0x212; 1800 } 1801 1802 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1803 BDEV_IO_NUM_CHILD_IOV - 1); 1804 /* expect 0-29 to be 1:1 with the parent iov */ 1805 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1806 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1807 } 1808 1809 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1810 * where 0x1e is the amount we overshot the 16K boundary 1811 */ 1812 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1813 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1814 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1815 1816 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1817 * shortened that take it to the next boundary and then a final one to get us to 1818 * 0x4200 bytes for the IO. 1819 */ 1820 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1821 BDEV_IO_NUM_CHILD_IOV, 2); 1822 /* position 30 picked up the remaining bytes to the next boundary */ 1823 ut_expected_io_set_iov(expected_io, 0, 1824 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1825 1826 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1827 ut_expected_io_set_iov(expected_io, 1, 1828 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1829 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1830 1831 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1832 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1833 CU_ASSERT(rc == 0); 1834 CU_ASSERT(g_io_done == false); 1835 1836 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1837 stub_complete_io(1); 1838 CU_ASSERT(g_io_done == false); 1839 1840 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1841 stub_complete_io(1); 1842 CU_ASSERT(g_io_done == true); 1843 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1844 1845 spdk_put_io_channel(io_ch); 1846 spdk_bdev_close(desc); 1847 free_bdev(bdev); 1848 spdk_bdev_finish(bdev_fini_cb, NULL); 1849 poll_threads(); 1850 } 1851 1852 static void 1853 bdev_io_max_size_and_segment_split_test(void) 1854 { 1855 struct spdk_bdev *bdev; 1856 struct spdk_bdev_desc *desc = NULL; 1857 struct spdk_io_channel *io_ch; 1858 struct spdk_bdev_opts bdev_opts = {}; 1859 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1860 struct ut_expected_io *expected_io; 1861 uint64_t i; 1862 int rc; 1863 1864 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1865 bdev_opts.bdev_io_pool_size = 512; 1866 bdev_opts.bdev_io_cache_size = 64; 1867 1868 bdev_opts.opts_size = sizeof(bdev_opts); 1869 rc = spdk_bdev_set_opts(&bdev_opts); 1870 CU_ASSERT(rc == 0); 1871 spdk_bdev_initialize(bdev_init_cb, NULL); 1872 1873 bdev = allocate_bdev("bdev0"); 1874 1875 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1876 CU_ASSERT(rc == 0); 1877 SPDK_CU_ASSERT_FATAL(desc != NULL); 1878 io_ch = spdk_bdev_get_io_channel(desc); 1879 CU_ASSERT(io_ch != NULL); 1880 1881 bdev->split_on_optimal_io_boundary = false; 1882 bdev->optimal_io_boundary = 0; 1883 1884 /* Case 0 max_num_segments == 0. 1885 * but segment size 2 * 512 > 512 1886 */ 1887 bdev->max_segment_size = 512; 1888 bdev->max_num_segments = 0; 1889 g_io_done = false; 1890 1891 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1892 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1893 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1894 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1895 1896 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1897 CU_ASSERT(rc == 0); 1898 CU_ASSERT(g_io_done == false); 1899 1900 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1901 stub_complete_io(1); 1902 CU_ASSERT(g_io_done == true); 1903 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1904 1905 /* Case 1 max_segment_size == 0 1906 * but iov num 2 > 1. 1907 */ 1908 bdev->max_segment_size = 0; 1909 bdev->max_num_segments = 1; 1910 g_io_done = false; 1911 1912 iov[0].iov_base = (void *)0x10000; 1913 iov[0].iov_len = 512; 1914 iov[1].iov_base = (void *)0x20000; 1915 iov[1].iov_len = 8 * 512; 1916 1917 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1918 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1919 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1920 1921 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1922 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1923 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1924 1925 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1926 CU_ASSERT(rc == 0); 1927 CU_ASSERT(g_io_done == false); 1928 1929 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1930 stub_complete_io(2); 1931 CU_ASSERT(g_io_done == true); 1932 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1933 1934 /* Test that a non-vector command is split correctly. 1935 * Set up the expected values before calling spdk_bdev_read_blocks 1936 */ 1937 bdev->max_segment_size = 512; 1938 bdev->max_num_segments = 1; 1939 g_io_done = false; 1940 1941 /* Child IO 0 */ 1942 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1943 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1944 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1945 1946 /* Child IO 1 */ 1947 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1948 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1949 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1950 1951 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1952 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1953 CU_ASSERT(rc == 0); 1954 CU_ASSERT(g_io_done == false); 1955 1956 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1957 stub_complete_io(2); 1958 CU_ASSERT(g_io_done == true); 1959 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1960 1961 /* Now set up a more complex, multi-vector command that needs to be split, 1962 * including splitting iovecs. 1963 */ 1964 bdev->max_segment_size = 2 * 512; 1965 bdev->max_num_segments = 1; 1966 g_io_done = false; 1967 1968 iov[0].iov_base = (void *)0x10000; 1969 iov[0].iov_len = 2 * 512; 1970 iov[1].iov_base = (void *)0x20000; 1971 iov[1].iov_len = 4 * 512; 1972 iov[2].iov_base = (void *)0x30000; 1973 iov[2].iov_len = 6 * 512; 1974 1975 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1976 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1977 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1978 1979 /* Split iov[1].size to 2 iov entries then split the segments */ 1980 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1981 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1982 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1983 1984 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1985 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1986 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1987 1988 /* Split iov[2].size to 3 iov entries then split the segments */ 1989 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1990 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1991 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1992 1993 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1994 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1995 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1996 1997 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1998 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1999 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2000 2001 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2002 CU_ASSERT(rc == 0); 2003 CU_ASSERT(g_io_done == false); 2004 2005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2006 stub_complete_io(6); 2007 CU_ASSERT(g_io_done == true); 2008 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2009 2010 /* Test multi vector command that needs to be split by strip and then needs to be 2011 * split further due to the capacity of parent IO child iovs. 2012 */ 2013 bdev->max_segment_size = 512; 2014 bdev->max_num_segments = 1; 2015 g_io_done = false; 2016 2017 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2018 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2019 iov[i].iov_len = 512 * 2; 2020 } 2021 2022 /* Each input iov.size is split into 2 iovs, 2023 * half of the input iov can fill all child iov entries of a single IO. 2024 */ 2025 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2026 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2027 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2028 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2029 2030 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2031 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2032 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2033 } 2034 2035 /* The remaining iov is split in the second round */ 2036 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2037 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2038 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2039 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2040 2041 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2042 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2043 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2044 } 2045 2046 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2047 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2048 CU_ASSERT(rc == 0); 2049 CU_ASSERT(g_io_done == false); 2050 2051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2052 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2053 CU_ASSERT(g_io_done == false); 2054 2055 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2056 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2057 CU_ASSERT(g_io_done == true); 2058 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2059 2060 /* A wrong case, a child IO that is divided does 2061 * not meet the principle of multiples of block size, 2062 * and exits with error 2063 */ 2064 bdev->max_segment_size = 512; 2065 bdev->max_num_segments = 1; 2066 g_io_done = false; 2067 2068 iov[0].iov_base = (void *)0x10000; 2069 iov[0].iov_len = 512 + 256; 2070 iov[1].iov_base = (void *)0x20000; 2071 iov[1].iov_len = 256; 2072 2073 /* iov[0] is split to 512 and 256. 2074 * 256 is less than a block size, and it is found 2075 * in the next round of split that it is the first child IO smaller than 2076 * the block size, so the error exit 2077 */ 2078 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2079 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2080 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2081 2082 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2083 CU_ASSERT(rc == 0); 2084 CU_ASSERT(g_io_done == false); 2085 2086 /* First child IO is OK */ 2087 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2088 stub_complete_io(1); 2089 CU_ASSERT(g_io_done == true); 2090 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2091 2092 /* error exit */ 2093 stub_complete_io(1); 2094 CU_ASSERT(g_io_done == true); 2095 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2096 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2097 2098 /* Test multi vector command that needs to be split by strip and then needs to be 2099 * split further due to the capacity of child iovs. 2100 * 2101 * In this case, the last two iovs need to be split, but it will exceed the capacity 2102 * of child iovs, so it needs to wait until the first batch completed. 2103 */ 2104 bdev->max_segment_size = 512; 2105 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2106 g_io_done = false; 2107 2108 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2109 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2110 iov[i].iov_len = 512; 2111 } 2112 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2113 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2114 iov[i].iov_len = 512 * 2; 2115 } 2116 2117 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2118 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2119 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2120 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2121 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2122 } 2123 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2124 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2125 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2126 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2127 2128 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2129 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2130 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2131 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2132 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2133 2134 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2135 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2136 CU_ASSERT(rc == 0); 2137 CU_ASSERT(g_io_done == false); 2138 2139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2140 stub_complete_io(1); 2141 CU_ASSERT(g_io_done == false); 2142 2143 /* Next round */ 2144 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2145 stub_complete_io(1); 2146 CU_ASSERT(g_io_done == true); 2147 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2148 2149 /* This case is similar to the previous one, but the io composed of 2150 * the last few entries of child iov is not enough for a blocklen, so they 2151 * cannot be put into this IO, but wait until the next time. 2152 */ 2153 bdev->max_segment_size = 512; 2154 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2155 g_io_done = false; 2156 2157 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2158 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2159 iov[i].iov_len = 512; 2160 } 2161 2162 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2163 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2164 iov[i].iov_len = 128; 2165 } 2166 2167 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2168 * Because the left 2 iov is not enough for a blocklen. 2169 */ 2170 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2171 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2172 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2173 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2174 } 2175 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2176 2177 /* The second child io waits until the end of the first child io before executing. 2178 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2179 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2180 */ 2181 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2182 1, 4); 2183 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2184 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2185 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2186 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2187 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2188 2189 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2190 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2191 CU_ASSERT(rc == 0); 2192 CU_ASSERT(g_io_done == false); 2193 2194 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2195 stub_complete_io(1); 2196 CU_ASSERT(g_io_done == false); 2197 2198 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2199 stub_complete_io(1); 2200 CU_ASSERT(g_io_done == true); 2201 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2202 2203 /* A very complicated case. Each sg entry exceeds max_segment_size and 2204 * needs to be split. At the same time, child io must be a multiple of blocklen. 2205 * At the same time, child iovcnt exceeds parent iovcnt. 2206 */ 2207 bdev->max_segment_size = 512 + 128; 2208 bdev->max_num_segments = 3; 2209 g_io_done = false; 2210 2211 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2212 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2213 iov[i].iov_len = 512 + 256; 2214 } 2215 2216 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2217 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2218 iov[i].iov_len = 512 + 128; 2219 } 2220 2221 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2222 * Consume 4 parent IO iov entries per for() round and 6 block size. 2223 * Generate 9 child IOs. 2224 */ 2225 for (i = 0; i < 3; i++) { 2226 uint32_t j = i * 4; 2227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2228 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2229 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2230 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2231 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2232 2233 /* Child io must be a multiple of blocklen 2234 * iov[j + 2] must be split. If the third entry is also added, 2235 * the multiple of blocklen cannot be guaranteed. But it still 2236 * occupies one iov entry of the parent child iov. 2237 */ 2238 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2239 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2240 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2241 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2242 2243 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2244 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2245 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2246 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2247 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2248 } 2249 2250 /* Child iov position at 27, the 10th child IO 2251 * iov entry index is 3 * 4 and offset is 3 * 6 2252 */ 2253 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2254 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2255 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2256 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2257 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2258 2259 /* Child iov position at 30, the 11th child IO */ 2260 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2261 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2262 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2263 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2264 2265 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2266 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2267 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2268 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2269 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2270 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2271 2272 /* Consume 9 child IOs and 27 child iov entries. 2273 * Consume 4 parent IO iov entries per for() round and 6 block size. 2274 * Parent IO iov index start from 16 and block offset start from 24 2275 */ 2276 for (i = 0; i < 3; i++) { 2277 uint32_t j = i * 4 + 16; 2278 uint32_t offset = i * 6 + 24; 2279 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2280 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2281 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2282 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2283 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2284 2285 /* Child io must be a multiple of blocklen 2286 * iov[j + 2] must be split. If the third entry is also added, 2287 * the multiple of blocklen cannot be guaranteed. But it still 2288 * occupies one iov entry of the parent child iov. 2289 */ 2290 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2291 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2292 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2293 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2294 2295 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2296 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2297 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2298 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2299 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2300 } 2301 2302 /* The 22th child IO, child iov position at 30 */ 2303 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2304 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2305 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2306 2307 /* The third round */ 2308 /* Here is the 23nd child IO and child iovpos is 0 */ 2309 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2310 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2311 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2312 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2313 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2314 2315 /* The 24th child IO */ 2316 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2317 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2318 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2319 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2320 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2321 2322 /* The 25th child IO */ 2323 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2324 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2325 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2326 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2327 2328 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2329 50, io_done, NULL); 2330 CU_ASSERT(rc == 0); 2331 CU_ASSERT(g_io_done == false); 2332 2333 /* Parent IO supports up to 32 child iovs, so it is calculated that 2334 * a maximum of 11 IOs can be split at a time, and the 2335 * splitting will continue after the first batch is over. 2336 */ 2337 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2338 stub_complete_io(11); 2339 CU_ASSERT(g_io_done == false); 2340 2341 /* The 2nd round */ 2342 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2343 stub_complete_io(11); 2344 CU_ASSERT(g_io_done == false); 2345 2346 /* The last round */ 2347 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2348 stub_complete_io(3); 2349 CU_ASSERT(g_io_done == true); 2350 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2351 2352 /* Test an WRITE_ZEROES. This should also not be split. */ 2353 bdev->max_segment_size = 512; 2354 bdev->max_num_segments = 1; 2355 g_io_done = false; 2356 2357 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2358 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2359 2360 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2361 CU_ASSERT(rc == 0); 2362 CU_ASSERT(g_io_done == false); 2363 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2364 stub_complete_io(1); 2365 CU_ASSERT(g_io_done == true); 2366 2367 /* Test an UNMAP. This should also not be split. */ 2368 g_io_done = false; 2369 2370 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2371 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2372 2373 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2374 CU_ASSERT(rc == 0); 2375 CU_ASSERT(g_io_done == false); 2376 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2377 stub_complete_io(1); 2378 CU_ASSERT(g_io_done == true); 2379 2380 /* Test a FLUSH. This should also not be split. */ 2381 g_io_done = false; 2382 2383 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2384 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2385 2386 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2387 CU_ASSERT(rc == 0); 2388 CU_ASSERT(g_io_done == false); 2389 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2390 stub_complete_io(1); 2391 CU_ASSERT(g_io_done == true); 2392 2393 spdk_put_io_channel(io_ch); 2394 spdk_bdev_close(desc); 2395 free_bdev(bdev); 2396 spdk_bdev_finish(bdev_fini_cb, NULL); 2397 poll_threads(); 2398 } 2399 2400 static void 2401 bdev_io_mix_split_test(void) 2402 { 2403 struct spdk_bdev *bdev; 2404 struct spdk_bdev_desc *desc = NULL; 2405 struct spdk_io_channel *io_ch; 2406 struct spdk_bdev_opts bdev_opts = {}; 2407 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2408 struct ut_expected_io *expected_io; 2409 uint64_t i; 2410 int rc; 2411 2412 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2413 bdev_opts.bdev_io_pool_size = 512; 2414 bdev_opts.bdev_io_cache_size = 64; 2415 2416 rc = spdk_bdev_set_opts(&bdev_opts); 2417 CU_ASSERT(rc == 0); 2418 spdk_bdev_initialize(bdev_init_cb, NULL); 2419 2420 bdev = allocate_bdev("bdev0"); 2421 2422 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2423 CU_ASSERT(rc == 0); 2424 SPDK_CU_ASSERT_FATAL(desc != NULL); 2425 io_ch = spdk_bdev_get_io_channel(desc); 2426 CU_ASSERT(io_ch != NULL); 2427 2428 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2429 bdev->split_on_optimal_io_boundary = true; 2430 bdev->optimal_io_boundary = 16; 2431 2432 bdev->max_segment_size = 512; 2433 bdev->max_num_segments = 16; 2434 g_io_done = false; 2435 2436 /* IO crossing the IO boundary requires split 2437 * Total 2 child IOs. 2438 */ 2439 2440 /* The 1st child IO split the segment_size to multiple segment entry */ 2441 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2442 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2443 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2444 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2445 2446 /* The 2nd child IO split the segment_size to multiple segment entry */ 2447 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2448 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2449 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2450 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2451 2452 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2453 CU_ASSERT(rc == 0); 2454 CU_ASSERT(g_io_done == false); 2455 2456 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2457 stub_complete_io(2); 2458 CU_ASSERT(g_io_done == true); 2459 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2460 2461 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2462 bdev->max_segment_size = 15 * 512; 2463 bdev->max_num_segments = 1; 2464 g_io_done = false; 2465 2466 /* IO crossing the IO boundary requires split. 2467 * The 1st child IO segment size exceeds the max_segment_size, 2468 * So 1st child IO will be splitted to multiple segment entry. 2469 * Then it split to 2 child IOs because of the max_num_segments. 2470 * Total 3 child IOs. 2471 */ 2472 2473 /* The first 2 IOs are in an IO boundary. 2474 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2475 * So it split to the first 2 IOs. 2476 */ 2477 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2478 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2479 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2480 2481 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2482 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2483 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2484 2485 /* The 3rd Child IO is because of the io boundary */ 2486 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2487 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2489 2490 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2491 CU_ASSERT(rc == 0); 2492 CU_ASSERT(g_io_done == false); 2493 2494 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2495 stub_complete_io(3); 2496 CU_ASSERT(g_io_done == true); 2497 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2498 2499 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2500 bdev->max_segment_size = 17 * 512; 2501 bdev->max_num_segments = 1; 2502 g_io_done = false; 2503 2504 /* IO crossing the IO boundary requires split. 2505 * Child IO does not split. 2506 * Total 2 child IOs. 2507 */ 2508 2509 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2510 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2511 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2512 2513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2514 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2516 2517 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2518 CU_ASSERT(rc == 0); 2519 CU_ASSERT(g_io_done == false); 2520 2521 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2522 stub_complete_io(2); 2523 CU_ASSERT(g_io_done == true); 2524 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2525 2526 /* Now set up a more complex, multi-vector command that needs to be split, 2527 * including splitting iovecs. 2528 * optimal_io_boundary < max_segment_size * max_num_segments 2529 */ 2530 bdev->max_segment_size = 3 * 512; 2531 bdev->max_num_segments = 6; 2532 g_io_done = false; 2533 2534 iov[0].iov_base = (void *)0x10000; 2535 iov[0].iov_len = 4 * 512; 2536 iov[1].iov_base = (void *)0x20000; 2537 iov[1].iov_len = 4 * 512; 2538 iov[2].iov_base = (void *)0x30000; 2539 iov[2].iov_len = 10 * 512; 2540 2541 /* IO crossing the IO boundary requires split. 2542 * The 1st child IO segment size exceeds the max_segment_size and after 2543 * splitting segment_size, the num_segments exceeds max_num_segments. 2544 * So 1st child IO will be splitted to 2 child IOs. 2545 * Total 3 child IOs. 2546 */ 2547 2548 /* The first 2 IOs are in an IO boundary. 2549 * After splitting segment size the segment num exceeds. 2550 * So it splits to 2 child IOs. 2551 */ 2552 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2553 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2554 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2555 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2556 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2557 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2558 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2559 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2560 2561 /* The 2nd child IO has the left segment entry */ 2562 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2563 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2564 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2565 2566 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2567 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2568 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2569 2570 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2571 CU_ASSERT(rc == 0); 2572 CU_ASSERT(g_io_done == false); 2573 2574 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2575 stub_complete_io(3); 2576 CU_ASSERT(g_io_done == true); 2577 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2578 2579 /* A very complicated case. Each sg entry exceeds max_segment_size 2580 * and split on io boundary. 2581 * optimal_io_boundary < max_segment_size * max_num_segments 2582 */ 2583 bdev->max_segment_size = 3 * 512; 2584 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2585 g_io_done = false; 2586 2587 for (i = 0; i < 20; i++) { 2588 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2589 iov[i].iov_len = 512 * 4; 2590 } 2591 2592 /* IO crossing the IO boundary requires split. 2593 * 80 block length can split 5 child IOs base on offset and IO boundary. 2594 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2595 * Total 5 child IOs. 2596 */ 2597 2598 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2599 * So each child IO occupies 8 child iov entries. 2600 */ 2601 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2602 for (i = 0; i < 4; i++) { 2603 int iovcnt = i * 2; 2604 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2605 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2606 } 2607 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2608 2609 /* 2nd child IO and total 16 child iov entries of parent IO */ 2610 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2611 for (i = 4; i < 8; i++) { 2612 int iovcnt = (i - 4) * 2; 2613 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2614 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2615 } 2616 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2617 2618 /* 3rd child IO and total 24 child iov entries of parent IO */ 2619 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2620 for (i = 8; i < 12; i++) { 2621 int iovcnt = (i - 8) * 2; 2622 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2623 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2624 } 2625 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2626 2627 /* 4th child IO and total 32 child iov entries of parent IO */ 2628 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2629 for (i = 12; i < 16; i++) { 2630 int iovcnt = (i - 12) * 2; 2631 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2632 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2633 } 2634 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2635 2636 /* 5th child IO and because of the child iov entry it should be splitted 2637 * in next round. 2638 */ 2639 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2640 for (i = 16; i < 20; i++) { 2641 int iovcnt = (i - 16) * 2; 2642 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2643 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2644 } 2645 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2646 2647 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2648 CU_ASSERT(rc == 0); 2649 CU_ASSERT(g_io_done == false); 2650 2651 /* First split round */ 2652 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2653 stub_complete_io(4); 2654 CU_ASSERT(g_io_done == false); 2655 2656 /* Second split round */ 2657 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2658 stub_complete_io(1); 2659 CU_ASSERT(g_io_done == true); 2660 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2661 2662 spdk_put_io_channel(io_ch); 2663 spdk_bdev_close(desc); 2664 free_bdev(bdev); 2665 spdk_bdev_finish(bdev_fini_cb, NULL); 2666 poll_threads(); 2667 } 2668 2669 static void 2670 bdev_io_split_with_io_wait(void) 2671 { 2672 struct spdk_bdev *bdev; 2673 struct spdk_bdev_desc *desc = NULL; 2674 struct spdk_io_channel *io_ch; 2675 struct spdk_bdev_channel *channel; 2676 struct spdk_bdev_mgmt_channel *mgmt_ch; 2677 struct spdk_bdev_opts bdev_opts = {}; 2678 struct iovec iov[3]; 2679 struct ut_expected_io *expected_io; 2680 int rc; 2681 2682 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2683 bdev_opts.bdev_io_pool_size = 2; 2684 bdev_opts.bdev_io_cache_size = 1; 2685 2686 rc = spdk_bdev_set_opts(&bdev_opts); 2687 CU_ASSERT(rc == 0); 2688 spdk_bdev_initialize(bdev_init_cb, NULL); 2689 2690 bdev = allocate_bdev("bdev0"); 2691 2692 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2693 CU_ASSERT(rc == 0); 2694 CU_ASSERT(desc != NULL); 2695 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2696 io_ch = spdk_bdev_get_io_channel(desc); 2697 CU_ASSERT(io_ch != NULL); 2698 channel = spdk_io_channel_get_ctx(io_ch); 2699 mgmt_ch = channel->shared_resource->mgmt_ch; 2700 2701 bdev->optimal_io_boundary = 16; 2702 bdev->split_on_optimal_io_boundary = true; 2703 2704 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2705 CU_ASSERT(rc == 0); 2706 2707 /* Now test that a single-vector command is split correctly. 2708 * Offset 14, length 8, payload 0xF000 2709 * Child - Offset 14, length 2, payload 0xF000 2710 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2711 * 2712 * Set up the expected values before calling spdk_bdev_read_blocks 2713 */ 2714 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2715 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2716 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2717 2718 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2719 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2720 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2721 2722 /* The following children will be submitted sequentially due to the capacity of 2723 * spdk_bdev_io. 2724 */ 2725 2726 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2727 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2728 CU_ASSERT(rc == 0); 2729 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2730 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2731 2732 /* Completing the first read I/O will submit the first child */ 2733 stub_complete_io(1); 2734 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2735 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2736 2737 /* Completing the first child will submit the second child */ 2738 stub_complete_io(1); 2739 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2740 2741 /* Complete the second child I/O. This should result in our callback getting 2742 * invoked since the parent I/O is now complete. 2743 */ 2744 stub_complete_io(1); 2745 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2746 2747 /* Now set up a more complex, multi-vector command that needs to be split, 2748 * including splitting iovecs. 2749 */ 2750 iov[0].iov_base = (void *)0x10000; 2751 iov[0].iov_len = 512; 2752 iov[1].iov_base = (void *)0x20000; 2753 iov[1].iov_len = 20 * 512; 2754 iov[2].iov_base = (void *)0x30000; 2755 iov[2].iov_len = 11 * 512; 2756 2757 g_io_done = false; 2758 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2759 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2760 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2761 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2762 2763 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2764 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2765 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2766 2767 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2768 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2769 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2770 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2771 2772 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2773 CU_ASSERT(rc == 0); 2774 CU_ASSERT(g_io_done == false); 2775 2776 /* The following children will be submitted sequentially due to the capacity of 2777 * spdk_bdev_io. 2778 */ 2779 2780 /* Completing the first child will submit the second child */ 2781 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2782 stub_complete_io(1); 2783 CU_ASSERT(g_io_done == false); 2784 2785 /* Completing the second child will submit the third child */ 2786 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2787 stub_complete_io(1); 2788 CU_ASSERT(g_io_done == false); 2789 2790 /* Completing the third child will result in our callback getting invoked 2791 * since the parent I/O is now complete. 2792 */ 2793 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2794 stub_complete_io(1); 2795 CU_ASSERT(g_io_done == true); 2796 2797 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2798 2799 spdk_put_io_channel(io_ch); 2800 spdk_bdev_close(desc); 2801 free_bdev(bdev); 2802 spdk_bdev_finish(bdev_fini_cb, NULL); 2803 poll_threads(); 2804 } 2805 2806 static void 2807 bdev_io_alignment(void) 2808 { 2809 struct spdk_bdev *bdev; 2810 struct spdk_bdev_desc *desc = NULL; 2811 struct spdk_io_channel *io_ch; 2812 struct spdk_bdev_opts bdev_opts = {}; 2813 int rc; 2814 void *buf = NULL; 2815 struct iovec iovs[2]; 2816 int iovcnt; 2817 uint64_t alignment; 2818 2819 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2820 bdev_opts.bdev_io_pool_size = 20; 2821 bdev_opts.bdev_io_cache_size = 2; 2822 2823 rc = spdk_bdev_set_opts(&bdev_opts); 2824 CU_ASSERT(rc == 0); 2825 spdk_bdev_initialize(bdev_init_cb, NULL); 2826 2827 fn_table.submit_request = stub_submit_request_get_buf; 2828 bdev = allocate_bdev("bdev0"); 2829 2830 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2831 CU_ASSERT(rc == 0); 2832 CU_ASSERT(desc != NULL); 2833 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2834 io_ch = spdk_bdev_get_io_channel(desc); 2835 CU_ASSERT(io_ch != NULL); 2836 2837 /* Create aligned buffer */ 2838 rc = posix_memalign(&buf, 4096, 8192); 2839 SPDK_CU_ASSERT_FATAL(rc == 0); 2840 2841 /* Pass aligned single buffer with no alignment required */ 2842 alignment = 1; 2843 bdev->required_alignment = spdk_u32log2(alignment); 2844 2845 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2846 CU_ASSERT(rc == 0); 2847 stub_complete_io(1); 2848 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2849 alignment)); 2850 2851 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2852 CU_ASSERT(rc == 0); 2853 stub_complete_io(1); 2854 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2855 alignment)); 2856 2857 /* Pass unaligned single buffer with no alignment required */ 2858 alignment = 1; 2859 bdev->required_alignment = spdk_u32log2(alignment); 2860 2861 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2862 CU_ASSERT(rc == 0); 2863 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2864 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2865 stub_complete_io(1); 2866 2867 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2868 CU_ASSERT(rc == 0); 2869 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2870 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2871 stub_complete_io(1); 2872 2873 /* Pass unaligned single buffer with 512 alignment required */ 2874 alignment = 512; 2875 bdev->required_alignment = spdk_u32log2(alignment); 2876 2877 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2878 CU_ASSERT(rc == 0); 2879 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2880 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2881 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2882 alignment)); 2883 stub_complete_io(1); 2884 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2885 2886 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2887 CU_ASSERT(rc == 0); 2888 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2889 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2890 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2891 alignment)); 2892 stub_complete_io(1); 2893 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2894 2895 /* Pass unaligned single buffer with 4096 alignment required */ 2896 alignment = 4096; 2897 bdev->required_alignment = spdk_u32log2(alignment); 2898 2899 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2900 CU_ASSERT(rc == 0); 2901 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2902 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2903 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2904 alignment)); 2905 stub_complete_io(1); 2906 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2907 2908 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2909 CU_ASSERT(rc == 0); 2910 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2911 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2912 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2913 alignment)); 2914 stub_complete_io(1); 2915 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2916 2917 /* Pass aligned iovs with no alignment required */ 2918 alignment = 1; 2919 bdev->required_alignment = spdk_u32log2(alignment); 2920 2921 iovcnt = 1; 2922 iovs[0].iov_base = buf; 2923 iovs[0].iov_len = 512; 2924 2925 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2926 CU_ASSERT(rc == 0); 2927 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2928 stub_complete_io(1); 2929 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2930 2931 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2932 CU_ASSERT(rc == 0); 2933 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2934 stub_complete_io(1); 2935 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2936 2937 /* Pass unaligned iovs with no alignment required */ 2938 alignment = 1; 2939 bdev->required_alignment = spdk_u32log2(alignment); 2940 2941 iovcnt = 2; 2942 iovs[0].iov_base = buf + 16; 2943 iovs[0].iov_len = 256; 2944 iovs[1].iov_base = buf + 16 + 256 + 32; 2945 iovs[1].iov_len = 256; 2946 2947 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2948 CU_ASSERT(rc == 0); 2949 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2950 stub_complete_io(1); 2951 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2952 2953 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2954 CU_ASSERT(rc == 0); 2955 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2956 stub_complete_io(1); 2957 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2958 2959 /* Pass unaligned iov with 2048 alignment required */ 2960 alignment = 2048; 2961 bdev->required_alignment = spdk_u32log2(alignment); 2962 2963 iovcnt = 2; 2964 iovs[0].iov_base = buf + 16; 2965 iovs[0].iov_len = 256; 2966 iovs[1].iov_base = buf + 16 + 256 + 32; 2967 iovs[1].iov_len = 256; 2968 2969 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2970 CU_ASSERT(rc == 0); 2971 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2972 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2973 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2974 alignment)); 2975 stub_complete_io(1); 2976 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2977 2978 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2979 CU_ASSERT(rc == 0); 2980 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2981 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2982 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2983 alignment)); 2984 stub_complete_io(1); 2985 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2986 2987 /* Pass iov without allocated buffer without alignment required */ 2988 alignment = 1; 2989 bdev->required_alignment = spdk_u32log2(alignment); 2990 2991 iovcnt = 1; 2992 iovs[0].iov_base = NULL; 2993 iovs[0].iov_len = 0; 2994 2995 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2996 CU_ASSERT(rc == 0); 2997 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2998 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2999 alignment)); 3000 stub_complete_io(1); 3001 3002 /* Pass iov without allocated buffer with 1024 alignment required */ 3003 alignment = 1024; 3004 bdev->required_alignment = spdk_u32log2(alignment); 3005 3006 iovcnt = 1; 3007 iovs[0].iov_base = NULL; 3008 iovs[0].iov_len = 0; 3009 3010 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3011 CU_ASSERT(rc == 0); 3012 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3013 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3014 alignment)); 3015 stub_complete_io(1); 3016 3017 spdk_put_io_channel(io_ch); 3018 spdk_bdev_close(desc); 3019 free_bdev(bdev); 3020 fn_table.submit_request = stub_submit_request; 3021 spdk_bdev_finish(bdev_fini_cb, NULL); 3022 poll_threads(); 3023 3024 free(buf); 3025 } 3026 3027 static void 3028 bdev_io_alignment_with_boundary(void) 3029 { 3030 struct spdk_bdev *bdev; 3031 struct spdk_bdev_desc *desc = NULL; 3032 struct spdk_io_channel *io_ch; 3033 struct spdk_bdev_opts bdev_opts = {}; 3034 int rc; 3035 void *buf = NULL; 3036 struct iovec iovs[2]; 3037 int iovcnt; 3038 uint64_t alignment; 3039 3040 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3041 bdev_opts.bdev_io_pool_size = 20; 3042 bdev_opts.bdev_io_cache_size = 2; 3043 3044 bdev_opts.opts_size = sizeof(bdev_opts); 3045 rc = spdk_bdev_set_opts(&bdev_opts); 3046 CU_ASSERT(rc == 0); 3047 spdk_bdev_initialize(bdev_init_cb, NULL); 3048 3049 fn_table.submit_request = stub_submit_request_get_buf; 3050 bdev = allocate_bdev("bdev0"); 3051 3052 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3053 CU_ASSERT(rc == 0); 3054 CU_ASSERT(desc != NULL); 3055 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3056 io_ch = spdk_bdev_get_io_channel(desc); 3057 CU_ASSERT(io_ch != NULL); 3058 3059 /* Create aligned buffer */ 3060 rc = posix_memalign(&buf, 4096, 131072); 3061 SPDK_CU_ASSERT_FATAL(rc == 0); 3062 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3063 3064 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3065 alignment = 512; 3066 bdev->required_alignment = spdk_u32log2(alignment); 3067 bdev->optimal_io_boundary = 2; 3068 bdev->split_on_optimal_io_boundary = true; 3069 3070 iovcnt = 1; 3071 iovs[0].iov_base = NULL; 3072 iovs[0].iov_len = 512 * 3; 3073 3074 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3075 CU_ASSERT(rc == 0); 3076 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3077 stub_complete_io(2); 3078 3079 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3080 alignment = 512; 3081 bdev->required_alignment = spdk_u32log2(alignment); 3082 bdev->optimal_io_boundary = 16; 3083 bdev->split_on_optimal_io_boundary = true; 3084 3085 iovcnt = 1; 3086 iovs[0].iov_base = NULL; 3087 iovs[0].iov_len = 512 * 16; 3088 3089 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3090 CU_ASSERT(rc == 0); 3091 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3092 stub_complete_io(2); 3093 3094 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3095 alignment = 512; 3096 bdev->required_alignment = spdk_u32log2(alignment); 3097 bdev->optimal_io_boundary = 128; 3098 bdev->split_on_optimal_io_boundary = true; 3099 3100 iovcnt = 1; 3101 iovs[0].iov_base = buf + 16; 3102 iovs[0].iov_len = 512 * 160; 3103 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3104 CU_ASSERT(rc == 0); 3105 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3106 stub_complete_io(2); 3107 3108 /* 512 * 3 with 2 IO boundary */ 3109 alignment = 512; 3110 bdev->required_alignment = spdk_u32log2(alignment); 3111 bdev->optimal_io_boundary = 2; 3112 bdev->split_on_optimal_io_boundary = true; 3113 3114 iovcnt = 2; 3115 iovs[0].iov_base = buf + 16; 3116 iovs[0].iov_len = 512; 3117 iovs[1].iov_base = buf + 16 + 512 + 32; 3118 iovs[1].iov_len = 1024; 3119 3120 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3121 CU_ASSERT(rc == 0); 3122 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3123 stub_complete_io(2); 3124 3125 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3126 CU_ASSERT(rc == 0); 3127 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3128 stub_complete_io(2); 3129 3130 /* 512 * 64 with 32 IO boundary */ 3131 bdev->optimal_io_boundary = 32; 3132 iovcnt = 2; 3133 iovs[0].iov_base = buf + 16; 3134 iovs[0].iov_len = 16384; 3135 iovs[1].iov_base = buf + 16 + 16384 + 32; 3136 iovs[1].iov_len = 16384; 3137 3138 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3139 CU_ASSERT(rc == 0); 3140 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3141 stub_complete_io(3); 3142 3143 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3144 CU_ASSERT(rc == 0); 3145 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3146 stub_complete_io(3); 3147 3148 /* 512 * 160 with 32 IO boundary */ 3149 iovcnt = 1; 3150 iovs[0].iov_base = buf + 16; 3151 iovs[0].iov_len = 16384 + 65536; 3152 3153 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3154 CU_ASSERT(rc == 0); 3155 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3156 stub_complete_io(6); 3157 3158 spdk_put_io_channel(io_ch); 3159 spdk_bdev_close(desc); 3160 free_bdev(bdev); 3161 fn_table.submit_request = stub_submit_request; 3162 spdk_bdev_finish(bdev_fini_cb, NULL); 3163 poll_threads(); 3164 3165 free(buf); 3166 } 3167 3168 static void 3169 histogram_status_cb(void *cb_arg, int status) 3170 { 3171 g_status = status; 3172 } 3173 3174 static void 3175 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3176 { 3177 g_status = status; 3178 g_histogram = histogram; 3179 } 3180 3181 static void 3182 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3183 uint64_t total, uint64_t so_far) 3184 { 3185 g_count += count; 3186 } 3187 3188 static void 3189 bdev_histograms(void) 3190 { 3191 struct spdk_bdev *bdev; 3192 struct spdk_bdev_desc *desc = NULL; 3193 struct spdk_io_channel *ch; 3194 struct spdk_histogram_data *histogram; 3195 uint8_t buf[4096]; 3196 int rc; 3197 3198 spdk_bdev_initialize(bdev_init_cb, NULL); 3199 3200 bdev = allocate_bdev("bdev"); 3201 3202 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3203 CU_ASSERT(rc == 0); 3204 CU_ASSERT(desc != NULL); 3205 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3206 3207 ch = spdk_bdev_get_io_channel(desc); 3208 CU_ASSERT(ch != NULL); 3209 3210 /* Enable histogram */ 3211 g_status = -1; 3212 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3213 poll_threads(); 3214 CU_ASSERT(g_status == 0); 3215 CU_ASSERT(bdev->internal.histogram_enabled == true); 3216 3217 /* Allocate histogram */ 3218 histogram = spdk_histogram_data_alloc(); 3219 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3220 3221 /* Check if histogram is zeroed */ 3222 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3223 poll_threads(); 3224 CU_ASSERT(g_status == 0); 3225 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3226 3227 g_count = 0; 3228 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3229 3230 CU_ASSERT(g_count == 0); 3231 3232 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3233 CU_ASSERT(rc == 0); 3234 3235 spdk_delay_us(10); 3236 stub_complete_io(1); 3237 poll_threads(); 3238 3239 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3240 CU_ASSERT(rc == 0); 3241 3242 spdk_delay_us(10); 3243 stub_complete_io(1); 3244 poll_threads(); 3245 3246 /* Check if histogram gathered data from all I/O channels */ 3247 g_histogram = NULL; 3248 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3249 poll_threads(); 3250 CU_ASSERT(g_status == 0); 3251 CU_ASSERT(bdev->internal.histogram_enabled == true); 3252 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3253 3254 g_count = 0; 3255 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3256 CU_ASSERT(g_count == 2); 3257 3258 /* Disable histogram */ 3259 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3260 poll_threads(); 3261 CU_ASSERT(g_status == 0); 3262 CU_ASSERT(bdev->internal.histogram_enabled == false); 3263 3264 /* Try to run histogram commands on disabled bdev */ 3265 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3266 poll_threads(); 3267 CU_ASSERT(g_status == -EFAULT); 3268 3269 spdk_histogram_data_free(histogram); 3270 spdk_put_io_channel(ch); 3271 spdk_bdev_close(desc); 3272 free_bdev(bdev); 3273 spdk_bdev_finish(bdev_fini_cb, NULL); 3274 poll_threads(); 3275 } 3276 3277 static void 3278 _bdev_compare(bool emulated) 3279 { 3280 struct spdk_bdev *bdev; 3281 struct spdk_bdev_desc *desc = NULL; 3282 struct spdk_io_channel *ioch; 3283 struct ut_expected_io *expected_io; 3284 uint64_t offset, num_blocks; 3285 uint32_t num_completed; 3286 char aa_buf[512]; 3287 char bb_buf[512]; 3288 struct iovec compare_iov; 3289 uint8_t io_type; 3290 int rc; 3291 3292 if (emulated) { 3293 io_type = SPDK_BDEV_IO_TYPE_READ; 3294 } else { 3295 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3296 } 3297 3298 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3299 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3300 3301 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3302 3303 spdk_bdev_initialize(bdev_init_cb, NULL); 3304 fn_table.submit_request = stub_submit_request_get_buf; 3305 bdev = allocate_bdev("bdev"); 3306 3307 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3308 CU_ASSERT_EQUAL(rc, 0); 3309 SPDK_CU_ASSERT_FATAL(desc != NULL); 3310 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3311 ioch = spdk_bdev_get_io_channel(desc); 3312 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3313 3314 fn_table.submit_request = stub_submit_request_get_buf; 3315 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3316 3317 offset = 50; 3318 num_blocks = 1; 3319 compare_iov.iov_base = aa_buf; 3320 compare_iov.iov_len = sizeof(aa_buf); 3321 3322 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3323 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3324 3325 g_io_done = false; 3326 g_compare_read_buf = aa_buf; 3327 g_compare_read_buf_len = sizeof(aa_buf); 3328 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3329 CU_ASSERT_EQUAL(rc, 0); 3330 num_completed = stub_complete_io(1); 3331 CU_ASSERT_EQUAL(num_completed, 1); 3332 CU_ASSERT(g_io_done == true); 3333 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3334 3335 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3336 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3337 3338 g_io_done = false; 3339 g_compare_read_buf = bb_buf; 3340 g_compare_read_buf_len = sizeof(bb_buf); 3341 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3342 CU_ASSERT_EQUAL(rc, 0); 3343 num_completed = stub_complete_io(1); 3344 CU_ASSERT_EQUAL(num_completed, 1); 3345 CU_ASSERT(g_io_done == true); 3346 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3347 3348 spdk_put_io_channel(ioch); 3349 spdk_bdev_close(desc); 3350 free_bdev(bdev); 3351 fn_table.submit_request = stub_submit_request; 3352 spdk_bdev_finish(bdev_fini_cb, NULL); 3353 poll_threads(); 3354 3355 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3356 3357 g_compare_read_buf = NULL; 3358 } 3359 3360 static void 3361 bdev_compare(void) 3362 { 3363 _bdev_compare(true); 3364 _bdev_compare(false); 3365 } 3366 3367 static void 3368 bdev_compare_and_write(void) 3369 { 3370 struct spdk_bdev *bdev; 3371 struct spdk_bdev_desc *desc = NULL; 3372 struct spdk_io_channel *ioch; 3373 struct ut_expected_io *expected_io; 3374 uint64_t offset, num_blocks; 3375 uint32_t num_completed; 3376 char aa_buf[512]; 3377 char bb_buf[512]; 3378 char cc_buf[512]; 3379 char write_buf[512]; 3380 struct iovec compare_iov; 3381 struct iovec write_iov; 3382 int rc; 3383 3384 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3385 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3386 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3387 3388 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3389 3390 spdk_bdev_initialize(bdev_init_cb, NULL); 3391 fn_table.submit_request = stub_submit_request_get_buf; 3392 bdev = allocate_bdev("bdev"); 3393 3394 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3395 CU_ASSERT_EQUAL(rc, 0); 3396 SPDK_CU_ASSERT_FATAL(desc != NULL); 3397 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3398 ioch = spdk_bdev_get_io_channel(desc); 3399 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3400 3401 fn_table.submit_request = stub_submit_request_get_buf; 3402 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3403 3404 offset = 50; 3405 num_blocks = 1; 3406 compare_iov.iov_base = aa_buf; 3407 compare_iov.iov_len = sizeof(aa_buf); 3408 write_iov.iov_base = bb_buf; 3409 write_iov.iov_len = sizeof(bb_buf); 3410 3411 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3412 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3413 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3415 3416 g_io_done = false; 3417 g_compare_read_buf = aa_buf; 3418 g_compare_read_buf_len = sizeof(aa_buf); 3419 memset(write_buf, 0, sizeof(write_buf)); 3420 g_compare_write_buf = write_buf; 3421 g_compare_write_buf_len = sizeof(write_buf); 3422 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3423 offset, num_blocks, io_done, NULL); 3424 /* Trigger range locking */ 3425 poll_threads(); 3426 CU_ASSERT_EQUAL(rc, 0); 3427 num_completed = stub_complete_io(1); 3428 CU_ASSERT_EQUAL(num_completed, 1); 3429 CU_ASSERT(g_io_done == false); 3430 num_completed = stub_complete_io(1); 3431 /* Trigger range unlocking */ 3432 poll_threads(); 3433 CU_ASSERT_EQUAL(num_completed, 1); 3434 CU_ASSERT(g_io_done == true); 3435 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3436 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3437 3438 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3439 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3440 3441 g_io_done = false; 3442 g_compare_read_buf = cc_buf; 3443 g_compare_read_buf_len = sizeof(cc_buf); 3444 memset(write_buf, 0, sizeof(write_buf)); 3445 g_compare_write_buf = write_buf; 3446 g_compare_write_buf_len = sizeof(write_buf); 3447 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3448 offset, num_blocks, io_done, NULL); 3449 /* Trigger range locking */ 3450 poll_threads(); 3451 CU_ASSERT_EQUAL(rc, 0); 3452 num_completed = stub_complete_io(1); 3453 /* Trigger range unlocking earlier because we expect error here */ 3454 poll_threads(); 3455 CU_ASSERT_EQUAL(num_completed, 1); 3456 CU_ASSERT(g_io_done == true); 3457 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3458 num_completed = stub_complete_io(1); 3459 CU_ASSERT_EQUAL(num_completed, 0); 3460 3461 spdk_put_io_channel(ioch); 3462 spdk_bdev_close(desc); 3463 free_bdev(bdev); 3464 fn_table.submit_request = stub_submit_request; 3465 spdk_bdev_finish(bdev_fini_cb, NULL); 3466 poll_threads(); 3467 3468 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3469 3470 g_compare_read_buf = NULL; 3471 g_compare_write_buf = NULL; 3472 } 3473 3474 static void 3475 bdev_write_zeroes(void) 3476 { 3477 struct spdk_bdev *bdev; 3478 struct spdk_bdev_desc *desc = NULL; 3479 struct spdk_io_channel *ioch; 3480 struct ut_expected_io *expected_io; 3481 uint64_t offset, num_io_blocks, num_blocks; 3482 uint32_t num_completed, num_requests; 3483 int rc; 3484 3485 spdk_bdev_initialize(bdev_init_cb, NULL); 3486 bdev = allocate_bdev("bdev"); 3487 3488 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3489 CU_ASSERT_EQUAL(rc, 0); 3490 SPDK_CU_ASSERT_FATAL(desc != NULL); 3491 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3492 ioch = spdk_bdev_get_io_channel(desc); 3493 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3494 3495 fn_table.submit_request = stub_submit_request; 3496 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3497 3498 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3499 bdev->md_len = 0; 3500 bdev->blocklen = 4096; 3501 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3502 3503 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3504 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3505 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3506 CU_ASSERT_EQUAL(rc, 0); 3507 num_completed = stub_complete_io(1); 3508 CU_ASSERT_EQUAL(num_completed, 1); 3509 3510 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3511 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3512 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3513 num_requests = 2; 3514 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3515 3516 for (offset = 0; offset < num_requests; ++offset) { 3517 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3518 offset * num_io_blocks, num_io_blocks, 0); 3519 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3520 } 3521 3522 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3523 CU_ASSERT_EQUAL(rc, 0); 3524 num_completed = stub_complete_io(num_requests); 3525 CU_ASSERT_EQUAL(num_completed, num_requests); 3526 3527 /* Check that the splitting is correct if bdev has interleaved metadata */ 3528 bdev->md_interleave = true; 3529 bdev->md_len = 64; 3530 bdev->blocklen = 4096 + 64; 3531 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3532 3533 num_requests = offset = 0; 3534 while (offset < num_blocks) { 3535 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3536 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3537 offset, num_io_blocks, 0); 3538 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3539 offset += num_io_blocks; 3540 num_requests++; 3541 } 3542 3543 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3544 CU_ASSERT_EQUAL(rc, 0); 3545 num_completed = stub_complete_io(num_requests); 3546 CU_ASSERT_EQUAL(num_completed, num_requests); 3547 num_completed = stub_complete_io(num_requests); 3548 assert(num_completed == 0); 3549 3550 /* Check the the same for separate metadata buffer */ 3551 bdev->md_interleave = false; 3552 bdev->md_len = 64; 3553 bdev->blocklen = 4096; 3554 3555 num_requests = offset = 0; 3556 while (offset < num_blocks) { 3557 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3558 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3559 offset, num_io_blocks, 0); 3560 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3561 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3562 offset += num_io_blocks; 3563 num_requests++; 3564 } 3565 3566 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3567 CU_ASSERT_EQUAL(rc, 0); 3568 num_completed = stub_complete_io(num_requests); 3569 CU_ASSERT_EQUAL(num_completed, num_requests); 3570 3571 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3572 spdk_put_io_channel(ioch); 3573 spdk_bdev_close(desc); 3574 free_bdev(bdev); 3575 spdk_bdev_finish(bdev_fini_cb, NULL); 3576 poll_threads(); 3577 } 3578 3579 static void 3580 bdev_zcopy_write(void) 3581 { 3582 struct spdk_bdev *bdev; 3583 struct spdk_bdev_desc *desc = NULL; 3584 struct spdk_io_channel *ioch; 3585 struct ut_expected_io *expected_io; 3586 uint64_t offset, num_blocks; 3587 uint32_t num_completed; 3588 char aa_buf[512]; 3589 struct iovec iov; 3590 int rc; 3591 const bool populate = false; 3592 const bool commit = true; 3593 3594 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3595 3596 spdk_bdev_initialize(bdev_init_cb, NULL); 3597 bdev = allocate_bdev("bdev"); 3598 3599 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3600 CU_ASSERT_EQUAL(rc, 0); 3601 SPDK_CU_ASSERT_FATAL(desc != NULL); 3602 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3603 ioch = spdk_bdev_get_io_channel(desc); 3604 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3605 3606 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3607 3608 offset = 50; 3609 num_blocks = 1; 3610 iov.iov_base = NULL; 3611 iov.iov_len = 0; 3612 3613 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3614 g_zcopy_read_buf_len = (uint32_t) -1; 3615 /* Do a zcopy start for a write (populate=false) */ 3616 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3617 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3618 g_io_done = false; 3619 g_zcopy_write_buf = aa_buf; 3620 g_zcopy_write_buf_len = sizeof(aa_buf); 3621 g_zcopy_bdev_io = NULL; 3622 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3623 CU_ASSERT_EQUAL(rc, 0); 3624 num_completed = stub_complete_io(1); 3625 CU_ASSERT_EQUAL(num_completed, 1); 3626 CU_ASSERT(g_io_done == true); 3627 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3628 /* Check that the iov has been set up */ 3629 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3630 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3631 /* Check that the bdev_io has been saved */ 3632 CU_ASSERT(g_zcopy_bdev_io != NULL); 3633 /* Now do the zcopy end for a write (commit=true) */ 3634 g_io_done = false; 3635 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3636 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3637 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3638 CU_ASSERT_EQUAL(rc, 0); 3639 num_completed = stub_complete_io(1); 3640 CU_ASSERT_EQUAL(num_completed, 1); 3641 CU_ASSERT(g_io_done == true); 3642 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3643 /* Check the g_zcopy are reset by io_done */ 3644 CU_ASSERT(g_zcopy_write_buf == NULL); 3645 CU_ASSERT(g_zcopy_write_buf_len == 0); 3646 /* Check that io_done has freed the g_zcopy_bdev_io */ 3647 CU_ASSERT(g_zcopy_bdev_io == NULL); 3648 3649 /* Check the zcopy read buffer has not been touched which 3650 * ensures that the correct buffers were used. 3651 */ 3652 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3653 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3654 3655 spdk_put_io_channel(ioch); 3656 spdk_bdev_close(desc); 3657 free_bdev(bdev); 3658 spdk_bdev_finish(bdev_fini_cb, NULL); 3659 poll_threads(); 3660 } 3661 3662 static void 3663 bdev_zcopy_read(void) 3664 { 3665 struct spdk_bdev *bdev; 3666 struct spdk_bdev_desc *desc = NULL; 3667 struct spdk_io_channel *ioch; 3668 struct ut_expected_io *expected_io; 3669 uint64_t offset, num_blocks; 3670 uint32_t num_completed; 3671 char aa_buf[512]; 3672 struct iovec iov; 3673 int rc; 3674 const bool populate = true; 3675 const bool commit = false; 3676 3677 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3678 3679 spdk_bdev_initialize(bdev_init_cb, NULL); 3680 bdev = allocate_bdev("bdev"); 3681 3682 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3683 CU_ASSERT_EQUAL(rc, 0); 3684 SPDK_CU_ASSERT_FATAL(desc != NULL); 3685 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3686 ioch = spdk_bdev_get_io_channel(desc); 3687 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3688 3689 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3690 3691 offset = 50; 3692 num_blocks = 1; 3693 iov.iov_base = NULL; 3694 iov.iov_len = 0; 3695 3696 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3697 g_zcopy_write_buf_len = (uint32_t) -1; 3698 3699 /* Do a zcopy start for a read (populate=true) */ 3700 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3701 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3702 g_io_done = false; 3703 g_zcopy_read_buf = aa_buf; 3704 g_zcopy_read_buf_len = sizeof(aa_buf); 3705 g_zcopy_bdev_io = NULL; 3706 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3707 CU_ASSERT_EQUAL(rc, 0); 3708 num_completed = stub_complete_io(1); 3709 CU_ASSERT_EQUAL(num_completed, 1); 3710 CU_ASSERT(g_io_done == true); 3711 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3712 /* Check that the iov has been set up */ 3713 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3714 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3715 /* Check that the bdev_io has been saved */ 3716 CU_ASSERT(g_zcopy_bdev_io != NULL); 3717 3718 /* Now do the zcopy end for a read (commit=false) */ 3719 g_io_done = false; 3720 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3721 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3722 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3723 CU_ASSERT_EQUAL(rc, 0); 3724 num_completed = stub_complete_io(1); 3725 CU_ASSERT_EQUAL(num_completed, 1); 3726 CU_ASSERT(g_io_done == true); 3727 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3728 /* Check the g_zcopy are reset by io_done */ 3729 CU_ASSERT(g_zcopy_read_buf == NULL); 3730 CU_ASSERT(g_zcopy_read_buf_len == 0); 3731 /* Check that io_done has freed the g_zcopy_bdev_io */ 3732 CU_ASSERT(g_zcopy_bdev_io == NULL); 3733 3734 /* Check the zcopy write buffer has not been touched which 3735 * ensures that the correct buffers were used. 3736 */ 3737 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3738 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3739 3740 spdk_put_io_channel(ioch); 3741 spdk_bdev_close(desc); 3742 free_bdev(bdev); 3743 spdk_bdev_finish(bdev_fini_cb, NULL); 3744 poll_threads(); 3745 } 3746 3747 static void 3748 bdev_open_while_hotremove(void) 3749 { 3750 struct spdk_bdev *bdev; 3751 struct spdk_bdev_desc *desc[2] = {}; 3752 int rc; 3753 3754 bdev = allocate_bdev("bdev"); 3755 3756 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3757 CU_ASSERT(rc == 0); 3758 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3759 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3760 3761 spdk_bdev_unregister(bdev, NULL, NULL); 3762 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 3763 poll_threads(); 3764 3765 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3766 CU_ASSERT(rc == -ENODEV); 3767 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3768 3769 spdk_bdev_close(desc[0]); 3770 free_bdev(bdev); 3771 } 3772 3773 static void 3774 bdev_close_while_hotremove(void) 3775 { 3776 struct spdk_bdev *bdev; 3777 struct spdk_bdev_desc *desc = NULL; 3778 int rc = 0; 3779 3780 bdev = allocate_bdev("bdev"); 3781 3782 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3783 CU_ASSERT_EQUAL(rc, 0); 3784 SPDK_CU_ASSERT_FATAL(desc != NULL); 3785 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3786 3787 /* Simulate hot-unplug by unregistering bdev */ 3788 g_event_type1 = 0xFF; 3789 g_unregister_arg = NULL; 3790 g_unregister_rc = -1; 3791 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3792 /* Close device while remove event is in flight */ 3793 spdk_bdev_close(desc); 3794 3795 /* Ensure that unregister callback is delayed */ 3796 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3797 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3798 3799 poll_threads(); 3800 3801 /* Event callback shall not be issued because device was closed */ 3802 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3803 /* Unregister callback is issued */ 3804 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3805 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3806 3807 free_bdev(bdev); 3808 } 3809 3810 static void 3811 bdev_open_ext(void) 3812 { 3813 struct spdk_bdev *bdev; 3814 struct spdk_bdev_desc *desc1 = NULL; 3815 struct spdk_bdev_desc *desc2 = NULL; 3816 int rc = 0; 3817 3818 bdev = allocate_bdev("bdev"); 3819 3820 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3821 CU_ASSERT_EQUAL(rc, -EINVAL); 3822 3823 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3824 CU_ASSERT_EQUAL(rc, 0); 3825 3826 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3827 CU_ASSERT_EQUAL(rc, 0); 3828 3829 g_event_type1 = 0xFF; 3830 g_event_type2 = 0xFF; 3831 3832 /* Simulate hot-unplug by unregistering bdev */ 3833 spdk_bdev_unregister(bdev, NULL, NULL); 3834 poll_threads(); 3835 3836 /* Check if correct events have been triggered in event callback fn */ 3837 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3838 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3839 3840 free_bdev(bdev); 3841 poll_threads(); 3842 } 3843 3844 static void 3845 bdev_open_ext_unregister(void) 3846 { 3847 struct spdk_bdev *bdev; 3848 struct spdk_bdev_desc *desc1 = NULL; 3849 struct spdk_bdev_desc *desc2 = NULL; 3850 struct spdk_bdev_desc *desc3 = NULL; 3851 struct spdk_bdev_desc *desc4 = NULL; 3852 int rc = 0; 3853 3854 bdev = allocate_bdev("bdev"); 3855 3856 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3857 CU_ASSERT_EQUAL(rc, -EINVAL); 3858 3859 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3860 CU_ASSERT_EQUAL(rc, 0); 3861 3862 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3863 CU_ASSERT_EQUAL(rc, 0); 3864 3865 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 3866 CU_ASSERT_EQUAL(rc, 0); 3867 3868 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 3869 CU_ASSERT_EQUAL(rc, 0); 3870 3871 g_event_type1 = 0xFF; 3872 g_event_type2 = 0xFF; 3873 g_event_type3 = 0xFF; 3874 g_event_type4 = 0xFF; 3875 3876 g_unregister_arg = NULL; 3877 g_unregister_rc = -1; 3878 3879 /* Simulate hot-unplug by unregistering bdev */ 3880 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3881 3882 /* 3883 * Unregister is handled asynchronously and event callback 3884 * (i.e., above bdev_open_cbN) will be called. 3885 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 3886 * close the desc3 and desc4 so that the bdev is not closed. 3887 */ 3888 poll_threads(); 3889 3890 /* Check if correct events have been triggered in event callback fn */ 3891 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3892 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3893 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 3894 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 3895 3896 /* Check that unregister callback is delayed */ 3897 CU_ASSERT(g_unregister_arg == NULL); 3898 CU_ASSERT(g_unregister_rc == -1); 3899 3900 /* 3901 * Explicitly close desc3. As desc4 is still opened there, the 3902 * unergister callback is still delayed to execute. 3903 */ 3904 spdk_bdev_close(desc3); 3905 CU_ASSERT(g_unregister_arg == NULL); 3906 CU_ASSERT(g_unregister_rc == -1); 3907 3908 /* 3909 * Explicitly close desc4 to trigger the ongoing bdev unregister 3910 * operation after last desc is closed. 3911 */ 3912 spdk_bdev_close(desc4); 3913 3914 /* Poll the thread for the async unregister operation */ 3915 poll_threads(); 3916 3917 /* Check that unregister callback is executed */ 3918 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 3919 CU_ASSERT(g_unregister_rc == 0); 3920 3921 free_bdev(bdev); 3922 poll_threads(); 3923 } 3924 3925 struct timeout_io_cb_arg { 3926 struct iovec iov; 3927 uint8_t type; 3928 }; 3929 3930 static int 3931 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3932 { 3933 struct spdk_bdev_io *bdev_io; 3934 int n = 0; 3935 3936 if (!ch) { 3937 return -1; 3938 } 3939 3940 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3941 n++; 3942 } 3943 3944 return n; 3945 } 3946 3947 static void 3948 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3949 { 3950 struct timeout_io_cb_arg *ctx = cb_arg; 3951 3952 ctx->type = bdev_io->type; 3953 ctx->iov.iov_base = bdev_io->iov.iov_base; 3954 ctx->iov.iov_len = bdev_io->iov.iov_len; 3955 } 3956 3957 static void 3958 bdev_set_io_timeout(void) 3959 { 3960 struct spdk_bdev *bdev; 3961 struct spdk_bdev_desc *desc = NULL; 3962 struct spdk_io_channel *io_ch = NULL; 3963 struct spdk_bdev_channel *bdev_ch = NULL; 3964 struct timeout_io_cb_arg cb_arg; 3965 3966 spdk_bdev_initialize(bdev_init_cb, NULL); 3967 3968 bdev = allocate_bdev("bdev"); 3969 3970 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3971 SPDK_CU_ASSERT_FATAL(desc != NULL); 3972 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3973 3974 io_ch = spdk_bdev_get_io_channel(desc); 3975 CU_ASSERT(io_ch != NULL); 3976 3977 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3978 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3979 3980 /* This is the part1. 3981 * We will check the bdev_ch->io_submitted list 3982 * TO make sure that it can link IOs and only the user submitted IOs 3983 */ 3984 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3985 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3986 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3987 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3988 stub_complete_io(1); 3989 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3990 stub_complete_io(1); 3991 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3992 3993 /* Split IO */ 3994 bdev->optimal_io_boundary = 16; 3995 bdev->split_on_optimal_io_boundary = true; 3996 3997 /* Now test that a single-vector command is split correctly. 3998 * Offset 14, length 8, payload 0xF000 3999 * Child - Offset 14, length 2, payload 0xF000 4000 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4001 * 4002 * Set up the expected values before calling spdk_bdev_read_blocks 4003 */ 4004 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4005 /* We count all submitted IOs including IO that are generated by splitting. */ 4006 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4007 stub_complete_io(1); 4008 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4009 stub_complete_io(1); 4010 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4011 4012 /* Also include the reset IO */ 4013 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4014 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4015 poll_threads(); 4016 stub_complete_io(1); 4017 poll_threads(); 4018 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4019 4020 /* This is part2 4021 * Test the desc timeout poller register 4022 */ 4023 4024 /* Successfully set the timeout */ 4025 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4026 CU_ASSERT(desc->io_timeout_poller != NULL); 4027 CU_ASSERT(desc->timeout_in_sec == 30); 4028 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4029 CU_ASSERT(desc->cb_arg == &cb_arg); 4030 4031 /* Change the timeout limit */ 4032 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4033 CU_ASSERT(desc->io_timeout_poller != NULL); 4034 CU_ASSERT(desc->timeout_in_sec == 20); 4035 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4036 CU_ASSERT(desc->cb_arg == &cb_arg); 4037 4038 /* Disable the timeout */ 4039 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4040 CU_ASSERT(desc->io_timeout_poller == NULL); 4041 4042 /* This the part3 4043 * We will test to catch timeout IO and check whether the IO is 4044 * the submitted one. 4045 */ 4046 memset(&cb_arg, 0, sizeof(cb_arg)); 4047 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4048 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4049 4050 /* Don't reach the limit */ 4051 spdk_delay_us(15 * spdk_get_ticks_hz()); 4052 poll_threads(); 4053 CU_ASSERT(cb_arg.type == 0); 4054 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4055 CU_ASSERT(cb_arg.iov.iov_len == 0); 4056 4057 /* 15 + 15 = 30 reach the limit */ 4058 spdk_delay_us(15 * spdk_get_ticks_hz()); 4059 poll_threads(); 4060 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4061 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4062 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4063 stub_complete_io(1); 4064 4065 /* Use the same split IO above and check the IO */ 4066 memset(&cb_arg, 0, sizeof(cb_arg)); 4067 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4068 4069 /* The first child complete in time */ 4070 spdk_delay_us(15 * spdk_get_ticks_hz()); 4071 poll_threads(); 4072 stub_complete_io(1); 4073 CU_ASSERT(cb_arg.type == 0); 4074 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4075 CU_ASSERT(cb_arg.iov.iov_len == 0); 4076 4077 /* The second child reach the limit */ 4078 spdk_delay_us(15 * spdk_get_ticks_hz()); 4079 poll_threads(); 4080 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4081 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4082 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4083 stub_complete_io(1); 4084 4085 /* Also include the reset IO */ 4086 memset(&cb_arg, 0, sizeof(cb_arg)); 4087 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4088 spdk_delay_us(30 * spdk_get_ticks_hz()); 4089 poll_threads(); 4090 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4091 stub_complete_io(1); 4092 poll_threads(); 4093 4094 spdk_put_io_channel(io_ch); 4095 spdk_bdev_close(desc); 4096 free_bdev(bdev); 4097 spdk_bdev_finish(bdev_fini_cb, NULL); 4098 poll_threads(); 4099 } 4100 4101 static void 4102 bdev_set_qd_sampling(void) 4103 { 4104 struct spdk_bdev *bdev; 4105 struct spdk_bdev_desc *desc = NULL; 4106 struct spdk_io_channel *io_ch = NULL; 4107 struct spdk_bdev_channel *bdev_ch = NULL; 4108 struct timeout_io_cb_arg cb_arg; 4109 4110 spdk_bdev_initialize(bdev_init_cb, NULL); 4111 4112 bdev = allocate_bdev("bdev"); 4113 4114 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4115 SPDK_CU_ASSERT_FATAL(desc != NULL); 4116 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4117 4118 io_ch = spdk_bdev_get_io_channel(desc); 4119 CU_ASSERT(io_ch != NULL); 4120 4121 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4122 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4123 4124 /* This is the part1. 4125 * We will check the bdev_ch->io_submitted list 4126 * TO make sure that it can link IOs and only the user submitted IOs 4127 */ 4128 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4129 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4130 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4131 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4132 stub_complete_io(1); 4133 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4134 stub_complete_io(1); 4135 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4136 4137 /* This is the part2. 4138 * Test the bdev's qd poller register 4139 */ 4140 /* 1st Successfully set the qd sampling period */ 4141 spdk_bdev_set_qd_sampling_period(bdev, 10); 4142 CU_ASSERT(bdev->internal.new_period == 10); 4143 CU_ASSERT(bdev->internal.period == 10); 4144 CU_ASSERT(bdev->internal.qd_desc != NULL); 4145 poll_threads(); 4146 CU_ASSERT(bdev->internal.qd_poller != NULL); 4147 4148 /* 2nd Change the qd sampling period */ 4149 spdk_bdev_set_qd_sampling_period(bdev, 20); 4150 CU_ASSERT(bdev->internal.new_period == 20); 4151 CU_ASSERT(bdev->internal.period == 10); 4152 CU_ASSERT(bdev->internal.qd_desc != NULL); 4153 poll_threads(); 4154 CU_ASSERT(bdev->internal.qd_poller != NULL); 4155 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4156 4157 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4158 spdk_delay_us(20); 4159 poll_thread_times(0, 1); 4160 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4161 spdk_bdev_set_qd_sampling_period(bdev, 30); 4162 CU_ASSERT(bdev->internal.new_period == 30); 4163 CU_ASSERT(bdev->internal.period == 20); 4164 poll_threads(); 4165 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4166 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4167 4168 /* 4th Disable the qd sampling period */ 4169 spdk_bdev_set_qd_sampling_period(bdev, 0); 4170 CU_ASSERT(bdev->internal.new_period == 0); 4171 CU_ASSERT(bdev->internal.period == 30); 4172 poll_threads(); 4173 CU_ASSERT(bdev->internal.qd_poller == NULL); 4174 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4175 CU_ASSERT(bdev->internal.qd_desc == NULL); 4176 4177 /* This is the part3. 4178 * We will test the submitted IO and reset works 4179 * properly with the qd sampling. 4180 */ 4181 memset(&cb_arg, 0, sizeof(cb_arg)); 4182 spdk_bdev_set_qd_sampling_period(bdev, 1); 4183 poll_threads(); 4184 4185 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4186 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4187 4188 /* Also include the reset IO */ 4189 memset(&cb_arg, 0, sizeof(cb_arg)); 4190 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4191 poll_threads(); 4192 4193 /* Close the desc */ 4194 spdk_put_io_channel(io_ch); 4195 spdk_bdev_close(desc); 4196 4197 /* Complete the submitted IO and reset */ 4198 stub_complete_io(2); 4199 poll_threads(); 4200 4201 free_bdev(bdev); 4202 spdk_bdev_finish(bdev_fini_cb, NULL); 4203 poll_threads(); 4204 } 4205 4206 static void 4207 lba_range_overlap(void) 4208 { 4209 struct lba_range r1, r2; 4210 4211 r1.offset = 100; 4212 r1.length = 50; 4213 4214 r2.offset = 0; 4215 r2.length = 1; 4216 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4217 4218 r2.offset = 0; 4219 r2.length = 100; 4220 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4221 4222 r2.offset = 0; 4223 r2.length = 110; 4224 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4225 4226 r2.offset = 100; 4227 r2.length = 10; 4228 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4229 4230 r2.offset = 110; 4231 r2.length = 20; 4232 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4233 4234 r2.offset = 140; 4235 r2.length = 150; 4236 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4237 4238 r2.offset = 130; 4239 r2.length = 200; 4240 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4241 4242 r2.offset = 150; 4243 r2.length = 100; 4244 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4245 4246 r2.offset = 110; 4247 r2.length = 0; 4248 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4249 } 4250 4251 static bool g_lock_lba_range_done; 4252 static bool g_unlock_lba_range_done; 4253 4254 static void 4255 lock_lba_range_done(void *ctx, int status) 4256 { 4257 g_lock_lba_range_done = true; 4258 } 4259 4260 static void 4261 unlock_lba_range_done(void *ctx, int status) 4262 { 4263 g_unlock_lba_range_done = true; 4264 } 4265 4266 static void 4267 lock_lba_range_check_ranges(void) 4268 { 4269 struct spdk_bdev *bdev; 4270 struct spdk_bdev_desc *desc = NULL; 4271 struct spdk_io_channel *io_ch; 4272 struct spdk_bdev_channel *channel; 4273 struct lba_range *range; 4274 int ctx1; 4275 int rc; 4276 4277 spdk_bdev_initialize(bdev_init_cb, NULL); 4278 4279 bdev = allocate_bdev("bdev0"); 4280 4281 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4282 CU_ASSERT(rc == 0); 4283 CU_ASSERT(desc != NULL); 4284 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4285 io_ch = spdk_bdev_get_io_channel(desc); 4286 CU_ASSERT(io_ch != NULL); 4287 channel = spdk_io_channel_get_ctx(io_ch); 4288 4289 g_lock_lba_range_done = false; 4290 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4291 CU_ASSERT(rc == 0); 4292 poll_threads(); 4293 4294 CU_ASSERT(g_lock_lba_range_done == true); 4295 range = TAILQ_FIRST(&channel->locked_ranges); 4296 SPDK_CU_ASSERT_FATAL(range != NULL); 4297 CU_ASSERT(range->offset == 20); 4298 CU_ASSERT(range->length == 10); 4299 CU_ASSERT(range->owner_ch == channel); 4300 4301 /* Unlocks must exactly match a lock. */ 4302 g_unlock_lba_range_done = false; 4303 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4304 CU_ASSERT(rc == -EINVAL); 4305 CU_ASSERT(g_unlock_lba_range_done == false); 4306 4307 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4308 CU_ASSERT(rc == 0); 4309 spdk_delay_us(100); 4310 poll_threads(); 4311 4312 CU_ASSERT(g_unlock_lba_range_done == true); 4313 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4314 4315 spdk_put_io_channel(io_ch); 4316 spdk_bdev_close(desc); 4317 free_bdev(bdev); 4318 spdk_bdev_finish(bdev_fini_cb, NULL); 4319 poll_threads(); 4320 } 4321 4322 static void 4323 lock_lba_range_with_io_outstanding(void) 4324 { 4325 struct spdk_bdev *bdev; 4326 struct spdk_bdev_desc *desc = NULL; 4327 struct spdk_io_channel *io_ch; 4328 struct spdk_bdev_channel *channel; 4329 struct lba_range *range; 4330 char buf[4096]; 4331 int ctx1; 4332 int rc; 4333 4334 spdk_bdev_initialize(bdev_init_cb, NULL); 4335 4336 bdev = allocate_bdev("bdev0"); 4337 4338 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4339 CU_ASSERT(rc == 0); 4340 CU_ASSERT(desc != NULL); 4341 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4342 io_ch = spdk_bdev_get_io_channel(desc); 4343 CU_ASSERT(io_ch != NULL); 4344 channel = spdk_io_channel_get_ctx(io_ch); 4345 4346 g_io_done = false; 4347 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4348 CU_ASSERT(rc == 0); 4349 4350 g_lock_lba_range_done = false; 4351 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4352 CU_ASSERT(rc == 0); 4353 poll_threads(); 4354 4355 /* The lock should immediately become valid, since there are no outstanding 4356 * write I/O. 4357 */ 4358 CU_ASSERT(g_io_done == false); 4359 CU_ASSERT(g_lock_lba_range_done == true); 4360 range = TAILQ_FIRST(&channel->locked_ranges); 4361 SPDK_CU_ASSERT_FATAL(range != NULL); 4362 CU_ASSERT(range->offset == 20); 4363 CU_ASSERT(range->length == 10); 4364 CU_ASSERT(range->owner_ch == channel); 4365 CU_ASSERT(range->locked_ctx == &ctx1); 4366 4367 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4368 CU_ASSERT(rc == 0); 4369 stub_complete_io(1); 4370 spdk_delay_us(100); 4371 poll_threads(); 4372 4373 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4374 4375 /* Now try again, but with a write I/O. */ 4376 g_io_done = false; 4377 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4378 CU_ASSERT(rc == 0); 4379 4380 g_lock_lba_range_done = false; 4381 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4382 CU_ASSERT(rc == 0); 4383 poll_threads(); 4384 4385 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4386 * But note that the range should be on the channel's locked_list, to make sure no 4387 * new write I/O are started. 4388 */ 4389 CU_ASSERT(g_io_done == false); 4390 CU_ASSERT(g_lock_lba_range_done == false); 4391 range = TAILQ_FIRST(&channel->locked_ranges); 4392 SPDK_CU_ASSERT_FATAL(range != NULL); 4393 CU_ASSERT(range->offset == 20); 4394 CU_ASSERT(range->length == 10); 4395 4396 /* Complete the write I/O. This should make the lock valid (checked by confirming 4397 * our callback was invoked). 4398 */ 4399 stub_complete_io(1); 4400 spdk_delay_us(100); 4401 poll_threads(); 4402 CU_ASSERT(g_io_done == true); 4403 CU_ASSERT(g_lock_lba_range_done == true); 4404 4405 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4406 CU_ASSERT(rc == 0); 4407 poll_threads(); 4408 4409 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4410 4411 spdk_put_io_channel(io_ch); 4412 spdk_bdev_close(desc); 4413 free_bdev(bdev); 4414 spdk_bdev_finish(bdev_fini_cb, NULL); 4415 poll_threads(); 4416 } 4417 4418 static void 4419 lock_lba_range_overlapped(void) 4420 { 4421 struct spdk_bdev *bdev; 4422 struct spdk_bdev_desc *desc = NULL; 4423 struct spdk_io_channel *io_ch; 4424 struct spdk_bdev_channel *channel; 4425 struct lba_range *range; 4426 int ctx1; 4427 int rc; 4428 4429 spdk_bdev_initialize(bdev_init_cb, NULL); 4430 4431 bdev = allocate_bdev("bdev0"); 4432 4433 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4434 CU_ASSERT(rc == 0); 4435 CU_ASSERT(desc != NULL); 4436 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4437 io_ch = spdk_bdev_get_io_channel(desc); 4438 CU_ASSERT(io_ch != NULL); 4439 channel = spdk_io_channel_get_ctx(io_ch); 4440 4441 /* Lock range 20-29. */ 4442 g_lock_lba_range_done = false; 4443 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4444 CU_ASSERT(rc == 0); 4445 poll_threads(); 4446 4447 CU_ASSERT(g_lock_lba_range_done == true); 4448 range = TAILQ_FIRST(&channel->locked_ranges); 4449 SPDK_CU_ASSERT_FATAL(range != NULL); 4450 CU_ASSERT(range->offset == 20); 4451 CU_ASSERT(range->length == 10); 4452 4453 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4454 * 20-29. 4455 */ 4456 g_lock_lba_range_done = false; 4457 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4458 CU_ASSERT(rc == 0); 4459 poll_threads(); 4460 4461 CU_ASSERT(g_lock_lba_range_done == false); 4462 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4463 SPDK_CU_ASSERT_FATAL(range != NULL); 4464 CU_ASSERT(range->offset == 25); 4465 CU_ASSERT(range->length == 15); 4466 4467 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4468 * no longer overlaps with an active lock. 4469 */ 4470 g_unlock_lba_range_done = false; 4471 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4472 CU_ASSERT(rc == 0); 4473 poll_threads(); 4474 4475 CU_ASSERT(g_unlock_lba_range_done == true); 4476 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4477 range = TAILQ_FIRST(&channel->locked_ranges); 4478 SPDK_CU_ASSERT_FATAL(range != NULL); 4479 CU_ASSERT(range->offset == 25); 4480 CU_ASSERT(range->length == 15); 4481 4482 /* Lock 40-59. This should immediately lock since it does not overlap with the 4483 * currently active 25-39 lock. 4484 */ 4485 g_lock_lba_range_done = false; 4486 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4487 CU_ASSERT(rc == 0); 4488 poll_threads(); 4489 4490 CU_ASSERT(g_lock_lba_range_done == true); 4491 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4492 SPDK_CU_ASSERT_FATAL(range != NULL); 4493 range = TAILQ_NEXT(range, tailq); 4494 SPDK_CU_ASSERT_FATAL(range != NULL); 4495 CU_ASSERT(range->offset == 40); 4496 CU_ASSERT(range->length == 20); 4497 4498 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4499 g_lock_lba_range_done = false; 4500 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4501 CU_ASSERT(rc == 0); 4502 poll_threads(); 4503 4504 CU_ASSERT(g_lock_lba_range_done == false); 4505 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4506 SPDK_CU_ASSERT_FATAL(range != NULL); 4507 CU_ASSERT(range->offset == 35); 4508 CU_ASSERT(range->length == 10); 4509 4510 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4511 * the 40-59 lock is still active. 4512 */ 4513 g_unlock_lba_range_done = false; 4514 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4515 CU_ASSERT(rc == 0); 4516 poll_threads(); 4517 4518 CU_ASSERT(g_unlock_lba_range_done == true); 4519 CU_ASSERT(g_lock_lba_range_done == false); 4520 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4521 SPDK_CU_ASSERT_FATAL(range != NULL); 4522 CU_ASSERT(range->offset == 35); 4523 CU_ASSERT(range->length == 10); 4524 4525 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4526 * no longer any active overlapping locks. 4527 */ 4528 g_unlock_lba_range_done = false; 4529 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4530 CU_ASSERT(rc == 0); 4531 poll_threads(); 4532 4533 CU_ASSERT(g_unlock_lba_range_done == true); 4534 CU_ASSERT(g_lock_lba_range_done == true); 4535 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4536 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4537 SPDK_CU_ASSERT_FATAL(range != NULL); 4538 CU_ASSERT(range->offset == 35); 4539 CU_ASSERT(range->length == 10); 4540 4541 /* Finally, unlock 35-44. */ 4542 g_unlock_lba_range_done = false; 4543 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4544 CU_ASSERT(rc == 0); 4545 poll_threads(); 4546 4547 CU_ASSERT(g_unlock_lba_range_done == true); 4548 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4549 4550 spdk_put_io_channel(io_ch); 4551 spdk_bdev_close(desc); 4552 free_bdev(bdev); 4553 spdk_bdev_finish(bdev_fini_cb, NULL); 4554 poll_threads(); 4555 } 4556 4557 static void 4558 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4559 { 4560 g_abort_done = true; 4561 g_abort_status = bdev_io->internal.status; 4562 spdk_bdev_free_io(bdev_io); 4563 } 4564 4565 static void 4566 bdev_io_abort(void) 4567 { 4568 struct spdk_bdev *bdev; 4569 struct spdk_bdev_desc *desc = NULL; 4570 struct spdk_io_channel *io_ch; 4571 struct spdk_bdev_channel *channel; 4572 struct spdk_bdev_mgmt_channel *mgmt_ch; 4573 struct spdk_bdev_opts bdev_opts = {}; 4574 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4575 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4576 int rc; 4577 4578 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4579 bdev_opts.bdev_io_pool_size = 7; 4580 bdev_opts.bdev_io_cache_size = 2; 4581 4582 rc = spdk_bdev_set_opts(&bdev_opts); 4583 CU_ASSERT(rc == 0); 4584 spdk_bdev_initialize(bdev_init_cb, NULL); 4585 4586 bdev = allocate_bdev("bdev0"); 4587 4588 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4589 CU_ASSERT(rc == 0); 4590 CU_ASSERT(desc != NULL); 4591 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4592 io_ch = spdk_bdev_get_io_channel(desc); 4593 CU_ASSERT(io_ch != NULL); 4594 channel = spdk_io_channel_get_ctx(io_ch); 4595 mgmt_ch = channel->shared_resource->mgmt_ch; 4596 4597 g_abort_done = false; 4598 4599 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4600 4601 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4602 CU_ASSERT(rc == -ENOTSUP); 4603 4604 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4605 4606 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4607 CU_ASSERT(rc == 0); 4608 CU_ASSERT(g_abort_done == true); 4609 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4610 4611 /* Test the case that the target I/O was successfully aborted. */ 4612 g_io_done = false; 4613 4614 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4615 CU_ASSERT(rc == 0); 4616 CU_ASSERT(g_io_done == false); 4617 4618 g_abort_done = false; 4619 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4620 4621 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4622 CU_ASSERT(rc == 0); 4623 CU_ASSERT(g_io_done == true); 4624 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4625 stub_complete_io(1); 4626 CU_ASSERT(g_abort_done == true); 4627 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4628 4629 /* Test the case that the target I/O was not aborted because it completed 4630 * in the middle of execution of the abort. 4631 */ 4632 g_io_done = false; 4633 4634 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4635 CU_ASSERT(rc == 0); 4636 CU_ASSERT(g_io_done == false); 4637 4638 g_abort_done = false; 4639 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4640 4641 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4642 CU_ASSERT(rc == 0); 4643 CU_ASSERT(g_io_done == false); 4644 4645 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4646 stub_complete_io(1); 4647 CU_ASSERT(g_io_done == true); 4648 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4649 4650 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4651 stub_complete_io(1); 4652 CU_ASSERT(g_abort_done == true); 4653 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4654 4655 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4656 4657 bdev->optimal_io_boundary = 16; 4658 bdev->split_on_optimal_io_boundary = true; 4659 4660 /* Test that a single-vector command which is split is aborted correctly. 4661 * Offset 14, length 8, payload 0xF000 4662 * Child - Offset 14, length 2, payload 0xF000 4663 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4664 */ 4665 g_io_done = false; 4666 4667 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4668 CU_ASSERT(rc == 0); 4669 CU_ASSERT(g_io_done == false); 4670 4671 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4672 4673 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4674 4675 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4676 CU_ASSERT(rc == 0); 4677 CU_ASSERT(g_io_done == true); 4678 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4679 stub_complete_io(2); 4680 CU_ASSERT(g_abort_done == true); 4681 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4682 4683 /* Test that a multi-vector command that needs to be split by strip and then 4684 * needs to be split is aborted correctly. Abort is requested before the second 4685 * child I/O was submitted. The parent I/O should complete with failure without 4686 * submitting the second child I/O. 4687 */ 4688 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4689 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4690 iov[i].iov_len = 512; 4691 } 4692 4693 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4694 g_io_done = false; 4695 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4696 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4697 CU_ASSERT(rc == 0); 4698 CU_ASSERT(g_io_done == false); 4699 4700 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4701 4702 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4703 4704 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4705 CU_ASSERT(rc == 0); 4706 CU_ASSERT(g_io_done == true); 4707 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4708 stub_complete_io(1); 4709 CU_ASSERT(g_abort_done == true); 4710 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4711 4712 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4713 4714 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4715 4716 bdev->optimal_io_boundary = 16; 4717 g_io_done = false; 4718 4719 /* Test that a ingle-vector command which is split is aborted correctly. 4720 * Differently from the above, the child abort request will be submitted 4721 * sequentially due to the capacity of spdk_bdev_io. 4722 */ 4723 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4724 CU_ASSERT(rc == 0); 4725 CU_ASSERT(g_io_done == false); 4726 4727 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4728 4729 g_abort_done = false; 4730 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4731 4732 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4733 CU_ASSERT(rc == 0); 4734 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4735 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4736 4737 stub_complete_io(1); 4738 CU_ASSERT(g_io_done == true); 4739 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4740 stub_complete_io(3); 4741 CU_ASSERT(g_abort_done == true); 4742 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4743 4744 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4745 4746 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4747 4748 spdk_put_io_channel(io_ch); 4749 spdk_bdev_close(desc); 4750 free_bdev(bdev); 4751 spdk_bdev_finish(bdev_fini_cb, NULL); 4752 poll_threads(); 4753 } 4754 4755 static void 4756 bdev_unmap(void) 4757 { 4758 struct spdk_bdev *bdev; 4759 struct spdk_bdev_desc *desc = NULL; 4760 struct spdk_io_channel *ioch; 4761 struct spdk_bdev_channel *bdev_ch; 4762 struct ut_expected_io *expected_io; 4763 struct spdk_bdev_opts bdev_opts = {}; 4764 uint32_t i, num_outstanding; 4765 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4766 int rc; 4767 4768 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4769 bdev_opts.bdev_io_pool_size = 512; 4770 bdev_opts.bdev_io_cache_size = 64; 4771 rc = spdk_bdev_set_opts(&bdev_opts); 4772 CU_ASSERT(rc == 0); 4773 4774 spdk_bdev_initialize(bdev_init_cb, NULL); 4775 bdev = allocate_bdev("bdev"); 4776 4777 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4778 CU_ASSERT_EQUAL(rc, 0); 4779 SPDK_CU_ASSERT_FATAL(desc != NULL); 4780 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4781 ioch = spdk_bdev_get_io_channel(desc); 4782 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4783 bdev_ch = spdk_io_channel_get_ctx(ioch); 4784 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4785 4786 fn_table.submit_request = stub_submit_request; 4787 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4788 4789 /* Case 1: First test the request won't be split */ 4790 num_blocks = 32; 4791 4792 g_io_done = false; 4793 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4794 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4795 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4796 CU_ASSERT_EQUAL(rc, 0); 4797 CU_ASSERT(g_io_done == false); 4798 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4799 stub_complete_io(1); 4800 CU_ASSERT(g_io_done == true); 4801 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4802 4803 /* Case 2: Test the split with 2 children requests */ 4804 bdev->max_unmap = 8; 4805 bdev->max_unmap_segments = 2; 4806 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4807 num_blocks = max_unmap_blocks * 2; 4808 offset = 0; 4809 4810 g_io_done = false; 4811 for (i = 0; i < 2; i++) { 4812 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4813 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4814 offset += max_unmap_blocks; 4815 } 4816 4817 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4818 CU_ASSERT_EQUAL(rc, 0); 4819 CU_ASSERT(g_io_done == false); 4820 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4821 stub_complete_io(2); 4822 CU_ASSERT(g_io_done == true); 4823 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4824 4825 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4826 num_children = 15; 4827 num_blocks = max_unmap_blocks * num_children; 4828 g_io_done = false; 4829 offset = 0; 4830 for (i = 0; i < num_children; i++) { 4831 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4832 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4833 offset += max_unmap_blocks; 4834 } 4835 4836 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4837 CU_ASSERT_EQUAL(rc, 0); 4838 CU_ASSERT(g_io_done == false); 4839 4840 while (num_children > 0) { 4841 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4842 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4843 stub_complete_io(num_outstanding); 4844 num_children -= num_outstanding; 4845 } 4846 CU_ASSERT(g_io_done == true); 4847 4848 spdk_put_io_channel(ioch); 4849 spdk_bdev_close(desc); 4850 free_bdev(bdev); 4851 spdk_bdev_finish(bdev_fini_cb, NULL); 4852 poll_threads(); 4853 } 4854 4855 static void 4856 bdev_write_zeroes_split_test(void) 4857 { 4858 struct spdk_bdev *bdev; 4859 struct spdk_bdev_desc *desc = NULL; 4860 struct spdk_io_channel *ioch; 4861 struct spdk_bdev_channel *bdev_ch; 4862 struct ut_expected_io *expected_io; 4863 struct spdk_bdev_opts bdev_opts = {}; 4864 uint32_t i, num_outstanding; 4865 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 4866 int rc; 4867 4868 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4869 bdev_opts.bdev_io_pool_size = 512; 4870 bdev_opts.bdev_io_cache_size = 64; 4871 rc = spdk_bdev_set_opts(&bdev_opts); 4872 CU_ASSERT(rc == 0); 4873 4874 spdk_bdev_initialize(bdev_init_cb, NULL); 4875 bdev = allocate_bdev("bdev"); 4876 4877 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4878 CU_ASSERT_EQUAL(rc, 0); 4879 SPDK_CU_ASSERT_FATAL(desc != NULL); 4880 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4881 ioch = spdk_bdev_get_io_channel(desc); 4882 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4883 bdev_ch = spdk_io_channel_get_ctx(ioch); 4884 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4885 4886 fn_table.submit_request = stub_submit_request; 4887 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4888 4889 /* Case 1: First test the request won't be split */ 4890 num_blocks = 32; 4891 4892 g_io_done = false; 4893 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4894 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4895 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4896 CU_ASSERT_EQUAL(rc, 0); 4897 CU_ASSERT(g_io_done == false); 4898 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4899 stub_complete_io(1); 4900 CU_ASSERT(g_io_done == true); 4901 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4902 4903 /* Case 2: Test the split with 2 children requests */ 4904 max_write_zeroes_blocks = 8; 4905 bdev->max_write_zeroes = max_write_zeroes_blocks; 4906 num_blocks = max_write_zeroes_blocks * 2; 4907 offset = 0; 4908 4909 g_io_done = false; 4910 for (i = 0; i < 2; i++) { 4911 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4912 0); 4913 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4914 offset += max_write_zeroes_blocks; 4915 } 4916 4917 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4918 CU_ASSERT_EQUAL(rc, 0); 4919 CU_ASSERT(g_io_done == false); 4920 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4921 stub_complete_io(2); 4922 CU_ASSERT(g_io_done == true); 4923 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4924 4925 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4926 num_children = 15; 4927 num_blocks = max_write_zeroes_blocks * num_children; 4928 g_io_done = false; 4929 offset = 0; 4930 for (i = 0; i < num_children; i++) { 4931 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4932 0); 4933 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4934 offset += max_write_zeroes_blocks; 4935 } 4936 4937 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4938 CU_ASSERT_EQUAL(rc, 0); 4939 CU_ASSERT(g_io_done == false); 4940 4941 while (num_children > 0) { 4942 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4943 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4944 stub_complete_io(num_outstanding); 4945 num_children -= num_outstanding; 4946 } 4947 CU_ASSERT(g_io_done == true); 4948 4949 spdk_put_io_channel(ioch); 4950 spdk_bdev_close(desc); 4951 free_bdev(bdev); 4952 spdk_bdev_finish(bdev_fini_cb, NULL); 4953 poll_threads(); 4954 } 4955 4956 static void 4957 bdev_set_options_test(void) 4958 { 4959 struct spdk_bdev_opts bdev_opts = {}; 4960 int rc; 4961 4962 /* Case1: Do not set opts_size */ 4963 rc = spdk_bdev_set_opts(&bdev_opts); 4964 CU_ASSERT(rc == -1); 4965 4966 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4967 bdev_opts.bdev_io_pool_size = 4; 4968 bdev_opts.bdev_io_cache_size = 2; 4969 bdev_opts.small_buf_pool_size = 4; 4970 4971 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4972 rc = spdk_bdev_set_opts(&bdev_opts); 4973 CU_ASSERT(rc == -1); 4974 4975 /* Case 3: Do not set valid large_buf_pool_size */ 4976 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4977 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4978 rc = spdk_bdev_set_opts(&bdev_opts); 4979 CU_ASSERT(rc == -1); 4980 4981 /* Case4: set valid large buf_pool_size */ 4982 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4983 rc = spdk_bdev_set_opts(&bdev_opts); 4984 CU_ASSERT(rc == 0); 4985 4986 /* Case5: Set different valid value for small and large buf pool */ 4987 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4988 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4989 rc = spdk_bdev_set_opts(&bdev_opts); 4990 CU_ASSERT(rc == 0); 4991 } 4992 4993 static uint64_t 4994 get_ns_time(void) 4995 { 4996 int rc; 4997 struct timespec ts; 4998 4999 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5000 CU_ASSERT(rc == 0); 5001 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5002 } 5003 5004 static int 5005 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5006 { 5007 int h1, h2; 5008 5009 if (bdev_name == NULL) { 5010 return -1; 5011 } else { 5012 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5013 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5014 5015 return spdk_max(h1, h2) + 1; 5016 } 5017 } 5018 5019 static void 5020 bdev_multi_allocation(void) 5021 { 5022 const int max_bdev_num = 1024 * 16; 5023 char name[max_bdev_num][16]; 5024 char noexist_name[] = "invalid_bdev"; 5025 struct spdk_bdev *bdev[max_bdev_num]; 5026 int i, j; 5027 uint64_t last_time; 5028 int bdev_num; 5029 int height; 5030 5031 for (j = 0; j < max_bdev_num; j++) { 5032 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5033 } 5034 5035 for (i = 0; i < 16; i++) { 5036 last_time = get_ns_time(); 5037 bdev_num = 1024 * (i + 1); 5038 for (j = 0; j < bdev_num; j++) { 5039 bdev[j] = allocate_bdev(name[j]); 5040 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5041 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5042 } 5043 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5044 (get_ns_time() - last_time) / 1000 / 1000); 5045 for (j = 0; j < bdev_num; j++) { 5046 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5047 } 5048 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5049 5050 for (j = 0; j < bdev_num; j++) { 5051 free_bdev(bdev[j]); 5052 } 5053 for (j = 0; j < bdev_num; j++) { 5054 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5055 } 5056 } 5057 } 5058 5059 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5060 5061 static int 5062 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5063 int array_size) 5064 { 5065 if (array_size > 0 && domains) { 5066 domains[0] = g_bdev_memory_domain; 5067 } 5068 5069 return 1; 5070 } 5071 5072 static void 5073 bdev_get_memory_domains(void) 5074 { 5075 struct spdk_bdev_fn_table fn_table = { 5076 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5077 }; 5078 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5079 struct spdk_memory_domain *domains[2] = {}; 5080 int rc; 5081 5082 /* bdev is NULL */ 5083 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5084 CU_ASSERT(rc == -EINVAL); 5085 5086 /* domains is NULL */ 5087 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5088 CU_ASSERT(rc == 1); 5089 5090 /* array size is 0 */ 5091 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5092 CU_ASSERT(rc == 1); 5093 5094 /* get_supported_dma_device_types op is set */ 5095 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5096 CU_ASSERT(rc == 1); 5097 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5098 5099 /* get_supported_dma_device_types op is not set */ 5100 fn_table.get_memory_domains = NULL; 5101 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5102 CU_ASSERT(rc == 0); 5103 } 5104 5105 static void 5106 bdev_writev_readv_ext(void) 5107 { 5108 struct spdk_bdev *bdev; 5109 struct spdk_bdev_desc *desc = NULL; 5110 struct spdk_io_channel *io_ch; 5111 char io_buf[512]; 5112 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5113 struct ut_expected_io *expected_io; 5114 struct spdk_bdev_ext_io_opts ext_io_opts = { 5115 .metadata = (void *)0xFF000000, 5116 .size = sizeof(ext_io_opts) 5117 }; 5118 int rc; 5119 5120 spdk_bdev_initialize(bdev_init_cb, NULL); 5121 5122 bdev = allocate_bdev("bdev0"); 5123 bdev->md_interleave = false; 5124 bdev->md_len = 8; 5125 5126 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5127 CU_ASSERT(rc == 0); 5128 SPDK_CU_ASSERT_FATAL(desc != NULL); 5129 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5130 io_ch = spdk_bdev_get_io_channel(desc); 5131 CU_ASSERT(io_ch != NULL); 5132 5133 /* Test 1, Simple test */ 5134 g_io_done = false; 5135 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5136 expected_io->md_buf = ext_io_opts.metadata; 5137 expected_io->ext_io_opts = &ext_io_opts; 5138 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5139 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5140 5141 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5142 5143 CU_ASSERT(rc == 0); 5144 CU_ASSERT(g_io_done == false); 5145 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5146 stub_complete_io(1); 5147 CU_ASSERT(g_io_done == true); 5148 5149 g_io_done = false; 5150 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5151 expected_io->md_buf = ext_io_opts.metadata; 5152 expected_io->ext_io_opts = &ext_io_opts; 5153 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5154 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5155 5156 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5157 5158 CU_ASSERT(rc == 0); 5159 CU_ASSERT(g_io_done == false); 5160 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5161 stub_complete_io(1); 5162 CU_ASSERT(g_io_done == true); 5163 5164 /* Test 2, invalid ext_opts size */ 5165 ext_io_opts.size = 0; 5166 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5167 CU_ASSERT(rc != 0); 5168 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5169 CU_ASSERT(rc != 0); 5170 5171 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5172 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5173 CU_ASSERT(rc != 0); 5174 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5175 CU_ASSERT(rc != 0); 5176 5177 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5178 sizeof(ext_io_opts.metadata) - 1; 5179 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5180 CU_ASSERT(rc != 0); 5181 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5182 CU_ASSERT(rc != 0); 5183 5184 /* Test 3, Check that IO request with ext_opts and metadata is split correctly 5185 * Offset 14, length 8, payload 0xF000 5186 * Child - Offset 14, length 2, payload 0xF000 5187 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5188 */ 5189 bdev->optimal_io_boundary = 16; 5190 bdev->split_on_optimal_io_boundary = true; 5191 bdev->md_interleave = false; 5192 bdev->md_len = 8; 5193 5194 iov.iov_base = (void *)0xF000; 5195 iov.iov_len = 4096; 5196 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5197 ext_io_opts.metadata = (void *)0xFF000000; 5198 ext_io_opts.size = sizeof(ext_io_opts); 5199 g_io_done = false; 5200 5201 /* read */ 5202 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5203 expected_io->md_buf = ext_io_opts.metadata; 5204 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5205 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5206 5207 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5208 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5209 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5210 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5211 5212 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5213 CU_ASSERT(rc == 0); 5214 CU_ASSERT(g_io_done == false); 5215 5216 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5217 stub_complete_io(2); 5218 CU_ASSERT(g_io_done == true); 5219 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5220 5221 /* write */ 5222 g_io_done = false; 5223 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5224 expected_io->md_buf = ext_io_opts.metadata; 5225 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5226 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5227 5228 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5229 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5230 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5231 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5232 5233 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5234 CU_ASSERT(rc == 0); 5235 CU_ASSERT(g_io_done == false); 5236 5237 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5238 stub_complete_io(2); 5239 CU_ASSERT(g_io_done == true); 5240 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5241 5242 /* Test 4, Verify data pull/push 5243 * bdev doens't support memory domains, so buffers from bdev memory pool will be used */ 5244 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5245 5246 g_io_done = false; 5247 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5248 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5249 expected_io->ext_io_opts = &ext_io_opts; 5250 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5251 5252 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5253 5254 CU_ASSERT(rc == 0); 5255 CU_ASSERT(g_io_done == false); 5256 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5257 stub_complete_io(1); 5258 CU_ASSERT(g_memory_domain_push_data_called == true); 5259 CU_ASSERT(g_io_done == true); 5260 5261 g_io_done = false; 5262 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5263 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5264 expected_io->ext_io_opts = &ext_io_opts; 5265 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5266 5267 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5268 5269 CU_ASSERT(rc == 0); 5270 CU_ASSERT(g_memory_domain_pull_data_called == true); 5271 CU_ASSERT(g_io_done == false); 5272 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5273 stub_complete_io(1); 5274 CU_ASSERT(g_io_done == true); 5275 5276 spdk_put_io_channel(io_ch); 5277 spdk_bdev_close(desc); 5278 free_bdev(bdev); 5279 spdk_bdev_finish(bdev_fini_cb, NULL); 5280 poll_threads(); 5281 } 5282 5283 static void 5284 bdev_register_uuid_alias(void) 5285 { 5286 struct spdk_bdev *bdev, *second; 5287 char uuid[SPDK_UUID_STRING_LEN]; 5288 int rc; 5289 5290 spdk_bdev_initialize(bdev_init_cb, NULL); 5291 bdev = allocate_bdev("bdev0"); 5292 5293 /* Make sure an UUID was generated */ 5294 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5295 5296 /* Check that an UUID alias was registered */ 5297 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5298 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5299 5300 /* Unregister the bdev */ 5301 spdk_bdev_unregister(bdev, NULL, NULL); 5302 poll_threads(); 5303 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5304 5305 /* Check the same, but this time register the bdev with non-zero UUID */ 5306 rc = spdk_bdev_register(bdev); 5307 CU_ASSERT_EQUAL(rc, 0); 5308 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5309 5310 /* Unregister the bdev */ 5311 spdk_bdev_unregister(bdev, NULL, NULL); 5312 poll_threads(); 5313 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5314 5315 /* Regiser the bdev using UUID as the name */ 5316 bdev->name = uuid; 5317 rc = spdk_bdev_register(bdev); 5318 CU_ASSERT_EQUAL(rc, 0); 5319 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5320 5321 /* Unregister the bdev */ 5322 spdk_bdev_unregister(bdev, NULL, NULL); 5323 poll_threads(); 5324 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5325 5326 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5327 bdev->name = "bdev0"; 5328 second = allocate_bdev("bdev1"); 5329 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5330 rc = spdk_bdev_register(bdev); 5331 CU_ASSERT_EQUAL(rc, -EEXIST); 5332 5333 /* Regenerate the UUID and re-check */ 5334 spdk_uuid_generate(&bdev->uuid); 5335 rc = spdk_bdev_register(bdev); 5336 CU_ASSERT_EQUAL(rc, 0); 5337 5338 /* And check that both bdevs can be retrieved through their UUIDs */ 5339 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5340 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5341 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5342 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5343 5344 free_bdev(second); 5345 free_bdev(bdev); 5346 spdk_bdev_finish(bdev_fini_cb, NULL); 5347 poll_threads(); 5348 } 5349 5350 static void 5351 bdev_unregister_by_name(void) 5352 { 5353 struct spdk_bdev *bdev; 5354 int rc; 5355 5356 bdev = allocate_bdev("bdev"); 5357 5358 g_event_type1 = 0xFF; 5359 g_unregister_arg = NULL; 5360 g_unregister_rc = -1; 5361 5362 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5363 CU_ASSERT(rc == -ENODEV); 5364 5365 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5366 CU_ASSERT(rc == -ENODEV); 5367 5368 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5369 CU_ASSERT(rc == 0); 5370 5371 /* Check that unregister callback is delayed */ 5372 CU_ASSERT(g_unregister_arg == NULL); 5373 CU_ASSERT(g_unregister_rc == -1); 5374 5375 poll_threads(); 5376 5377 /* Event callback shall not be issued because device was closed */ 5378 CU_ASSERT(g_event_type1 == 0xFF); 5379 /* Unregister callback is issued */ 5380 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5381 CU_ASSERT(g_unregister_rc == 0); 5382 5383 free_bdev(bdev); 5384 } 5385 5386 static int 5387 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5388 { 5389 int *count = ctx; 5390 5391 (*count)++; 5392 5393 return 0; 5394 } 5395 5396 static void 5397 for_each_bdev_test(void) 5398 { 5399 struct spdk_bdev *bdev[8]; 5400 int rc, count; 5401 5402 bdev[0] = allocate_bdev("bdev0"); 5403 5404 bdev[1] = allocate_bdev("bdev1"); 5405 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5406 CU_ASSERT(rc == 0); 5407 5408 bdev[2] = allocate_bdev("bdev2"); 5409 5410 bdev[3] = allocate_bdev("bdev3"); 5411 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5412 CU_ASSERT(rc == 0); 5413 5414 bdev[4] = allocate_bdev("bdev4"); 5415 5416 bdev[5] = allocate_bdev("bdev5"); 5417 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5418 CU_ASSERT(rc == 0); 5419 5420 bdev[6] = allocate_bdev("bdev6"); 5421 5422 bdev[7] = allocate_bdev("bdev7"); 5423 5424 count = 0; 5425 rc = spdk_for_each_bdev(&count, count_bdevs); 5426 CU_ASSERT(rc == 0); 5427 CU_ASSERT(count == 8); 5428 5429 count = 0; 5430 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5431 CU_ASSERT(rc == 0); 5432 CU_ASSERT(count == 5); 5433 5434 free_bdev(bdev[0]); 5435 free_bdev(bdev[1]); 5436 free_bdev(bdev[2]); 5437 free_bdev(bdev[3]); 5438 free_bdev(bdev[4]); 5439 free_bdev(bdev[5]); 5440 free_bdev(bdev[6]); 5441 free_bdev(bdev[7]); 5442 } 5443 5444 int 5445 main(int argc, char **argv) 5446 { 5447 CU_pSuite suite = NULL; 5448 unsigned int num_failures; 5449 5450 CU_set_error_action(CUEA_ABORT); 5451 CU_initialize_registry(); 5452 5453 suite = CU_add_suite("bdev", null_init, null_clean); 5454 5455 CU_ADD_TEST(suite, bytes_to_blocks_test); 5456 CU_ADD_TEST(suite, num_blocks_test); 5457 CU_ADD_TEST(suite, io_valid_test); 5458 CU_ADD_TEST(suite, open_write_test); 5459 CU_ADD_TEST(suite, claim_test); 5460 CU_ADD_TEST(suite, alias_add_del_test); 5461 CU_ADD_TEST(suite, get_device_stat_test); 5462 CU_ADD_TEST(suite, bdev_io_types_test); 5463 CU_ADD_TEST(suite, bdev_io_wait_test); 5464 CU_ADD_TEST(suite, bdev_io_spans_split_test); 5465 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 5466 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 5467 CU_ADD_TEST(suite, bdev_io_mix_split_test); 5468 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 5469 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 5470 CU_ADD_TEST(suite, bdev_io_alignment); 5471 CU_ADD_TEST(suite, bdev_histograms); 5472 CU_ADD_TEST(suite, bdev_write_zeroes); 5473 CU_ADD_TEST(suite, bdev_compare_and_write); 5474 CU_ADD_TEST(suite, bdev_compare); 5475 CU_ADD_TEST(suite, bdev_zcopy_write); 5476 CU_ADD_TEST(suite, bdev_zcopy_read); 5477 CU_ADD_TEST(suite, bdev_open_while_hotremove); 5478 CU_ADD_TEST(suite, bdev_close_while_hotremove); 5479 CU_ADD_TEST(suite, bdev_open_ext); 5480 CU_ADD_TEST(suite, bdev_open_ext_unregister); 5481 CU_ADD_TEST(suite, bdev_set_io_timeout); 5482 CU_ADD_TEST(suite, bdev_set_qd_sampling); 5483 CU_ADD_TEST(suite, lba_range_overlap); 5484 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 5485 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 5486 CU_ADD_TEST(suite, lock_lba_range_overlapped); 5487 CU_ADD_TEST(suite, bdev_io_abort); 5488 CU_ADD_TEST(suite, bdev_unmap); 5489 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 5490 CU_ADD_TEST(suite, bdev_set_options_test); 5491 CU_ADD_TEST(suite, bdev_multi_allocation); 5492 CU_ADD_TEST(suite, bdev_get_memory_domains); 5493 CU_ADD_TEST(suite, bdev_writev_readv_ext); 5494 CU_ADD_TEST(suite, bdev_register_uuid_alias); 5495 CU_ADD_TEST(suite, bdev_unregister_by_name); 5496 CU_ADD_TEST(suite, for_each_bdev_test); 5497 5498 allocate_cores(1); 5499 allocate_threads(1); 5500 set_thread(0); 5501 5502 CU_basic_set_mode(CU_BRM_VERBOSE); 5503 CU_basic_run_tests(); 5504 num_failures = CU_get_number_of_failures(); 5505 CU_cleanup_registry(); 5506 5507 free_threads(); 5508 free_cores(); 5509 5510 return num_failures; 5511 } 5512