1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 25 static bool g_memory_domain_pull_data_called; 26 static bool g_memory_domain_push_data_called; 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 g_memory_domain_pull_data_called = true; 35 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 g_memory_domain_push_data_called = true; 47 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 int g_status; 53 int g_count; 54 enum spdk_bdev_event_type g_event_type1; 55 enum spdk_bdev_event_type g_event_type2; 56 enum spdk_bdev_event_type g_event_type3; 57 enum spdk_bdev_event_type g_event_type4; 58 struct spdk_histogram_data *g_histogram; 59 void *g_unregister_arg; 60 int g_unregister_rc; 61 62 void 63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 64 int *sc, int *sk, int *asc, int *ascq) 65 { 66 } 67 68 static int 69 null_init(void) 70 { 71 return 0; 72 } 73 74 static int 75 null_clean(void) 76 { 77 return 0; 78 } 79 80 static int 81 stub_destruct(void *ctx) 82 { 83 return 0; 84 } 85 86 struct ut_expected_io { 87 uint8_t type; 88 uint64_t offset; 89 uint64_t length; 90 int iovcnt; 91 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 92 void *md_buf; 93 struct spdk_bdev_ext_io_opts *ext_io_opts; 94 TAILQ_ENTRY(ut_expected_io) link; 95 }; 96 97 struct bdev_ut_channel { 98 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 99 uint32_t outstanding_io_count; 100 TAILQ_HEAD(, ut_expected_io) expected_io; 101 }; 102 103 static bool g_io_done; 104 static struct spdk_bdev_io *g_bdev_io; 105 static enum spdk_bdev_io_status g_io_status; 106 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 107 static uint32_t g_bdev_ut_io_device; 108 static struct bdev_ut_channel *g_bdev_ut_channel; 109 static void *g_compare_read_buf; 110 static uint32_t g_compare_read_buf_len; 111 static void *g_compare_write_buf; 112 static uint32_t g_compare_write_buf_len; 113 static bool g_abort_done; 114 static enum spdk_bdev_io_status g_abort_status; 115 static void *g_zcopy_read_buf; 116 static uint32_t g_zcopy_read_buf_len; 117 static void *g_zcopy_write_buf; 118 static uint32_t g_zcopy_write_buf_len; 119 static struct spdk_bdev_io *g_zcopy_bdev_io; 120 121 static struct ut_expected_io * 122 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 123 { 124 struct ut_expected_io *expected_io; 125 126 expected_io = calloc(1, sizeof(*expected_io)); 127 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 128 129 expected_io->type = type; 130 expected_io->offset = offset; 131 expected_io->length = length; 132 expected_io->iovcnt = iovcnt; 133 134 return expected_io; 135 } 136 137 static void 138 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 139 { 140 expected_io->iov[pos].iov_base = base; 141 expected_io->iov[pos].iov_len = len; 142 } 143 144 static void 145 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 146 { 147 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 148 struct ut_expected_io *expected_io; 149 struct iovec *iov, *expected_iov; 150 struct spdk_bdev_io *bio_to_abort; 151 int i; 152 153 g_bdev_io = bdev_io; 154 155 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 156 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 157 158 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 159 CU_ASSERT(g_compare_read_buf_len == len); 160 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 161 } 162 163 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 164 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 165 166 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 167 CU_ASSERT(g_compare_write_buf_len == len); 168 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 169 } 170 171 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 172 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 173 174 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 175 CU_ASSERT(g_compare_read_buf_len == len); 176 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 177 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 178 } 179 } 180 181 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 182 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 183 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 184 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 185 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 186 ch->outstanding_io_count--; 187 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 188 break; 189 } 190 } 191 } 192 } 193 194 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 195 if (bdev_io->u.bdev.zcopy.start) { 196 g_zcopy_bdev_io = bdev_io; 197 if (bdev_io->u.bdev.zcopy.populate) { 198 /* Start of a read */ 199 CU_ASSERT(g_zcopy_read_buf != NULL); 200 CU_ASSERT(g_zcopy_read_buf_len > 0); 201 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 202 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 203 bdev_io->u.bdev.iovcnt = 1; 204 } else { 205 /* Start of a write */ 206 CU_ASSERT(g_zcopy_write_buf != NULL); 207 CU_ASSERT(g_zcopy_write_buf_len > 0); 208 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 209 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 210 bdev_io->u.bdev.iovcnt = 1; 211 } 212 } else { 213 if (bdev_io->u.bdev.zcopy.commit) { 214 /* End of write */ 215 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 216 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 217 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 218 g_zcopy_write_buf = NULL; 219 g_zcopy_write_buf_len = 0; 220 } else { 221 /* End of read */ 222 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 223 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 224 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 225 g_zcopy_read_buf = NULL; 226 g_zcopy_read_buf_len = 0; 227 } 228 } 229 } 230 231 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 232 ch->outstanding_io_count++; 233 234 expected_io = TAILQ_FIRST(&ch->expected_io); 235 if (expected_io == NULL) { 236 return; 237 } 238 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 239 240 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 241 CU_ASSERT(bdev_io->type == expected_io->type); 242 } 243 244 if (expected_io->md_buf != NULL) { 245 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 246 if (bdev_io->u.bdev.ext_opts) { 247 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata); 248 } 249 } 250 251 if (expected_io->length == 0) { 252 free(expected_io); 253 return; 254 } 255 256 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 257 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 258 259 if (expected_io->iovcnt == 0) { 260 free(expected_io); 261 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 262 return; 263 } 264 265 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 266 for (i = 0; i < expected_io->iovcnt; i++) { 267 expected_iov = &expected_io->iov[i]; 268 if (bdev_io->internal.orig_iovcnt == 0) { 269 iov = &bdev_io->u.bdev.iovs[i]; 270 } else { 271 iov = bdev_io->internal.orig_iovs; 272 } 273 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 274 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 275 } 276 277 if (expected_io->ext_io_opts) { 278 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts) 279 } 280 281 free(expected_io); 282 } 283 284 static void 285 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 286 struct spdk_bdev_io *bdev_io, bool success) 287 { 288 CU_ASSERT(success == true); 289 290 stub_submit_request(_ch, bdev_io); 291 } 292 293 static void 294 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 295 { 296 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 297 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 298 } 299 300 static uint32_t 301 stub_complete_io(uint32_t num_to_complete) 302 { 303 struct bdev_ut_channel *ch = g_bdev_ut_channel; 304 struct spdk_bdev_io *bdev_io; 305 static enum spdk_bdev_io_status io_status; 306 uint32_t num_completed = 0; 307 308 while (num_completed < num_to_complete) { 309 if (TAILQ_EMPTY(&ch->outstanding_io)) { 310 break; 311 } 312 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 313 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 314 ch->outstanding_io_count--; 315 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 316 g_io_exp_status; 317 spdk_bdev_io_complete(bdev_io, io_status); 318 num_completed++; 319 } 320 321 return num_completed; 322 } 323 324 static struct spdk_io_channel * 325 bdev_ut_get_io_channel(void *ctx) 326 { 327 return spdk_get_io_channel(&g_bdev_ut_io_device); 328 } 329 330 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 331 [SPDK_BDEV_IO_TYPE_READ] = true, 332 [SPDK_BDEV_IO_TYPE_WRITE] = true, 333 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 334 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 335 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 336 [SPDK_BDEV_IO_TYPE_RESET] = true, 337 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 338 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 339 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 340 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 341 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 342 [SPDK_BDEV_IO_TYPE_ABORT] = true, 343 }; 344 345 static void 346 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 347 { 348 g_io_types_supported[io_type] = enable; 349 } 350 351 static bool 352 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 353 { 354 return g_io_types_supported[io_type]; 355 } 356 357 static struct spdk_bdev_fn_table fn_table = { 358 .destruct = stub_destruct, 359 .submit_request = stub_submit_request, 360 .get_io_channel = bdev_ut_get_io_channel, 361 .io_type_supported = stub_io_type_supported, 362 }; 363 364 static int 365 bdev_ut_create_ch(void *io_device, void *ctx_buf) 366 { 367 struct bdev_ut_channel *ch = ctx_buf; 368 369 CU_ASSERT(g_bdev_ut_channel == NULL); 370 g_bdev_ut_channel = ch; 371 372 TAILQ_INIT(&ch->outstanding_io); 373 ch->outstanding_io_count = 0; 374 TAILQ_INIT(&ch->expected_io); 375 return 0; 376 } 377 378 static void 379 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 380 { 381 CU_ASSERT(g_bdev_ut_channel != NULL); 382 g_bdev_ut_channel = NULL; 383 } 384 385 struct spdk_bdev_module bdev_ut_if; 386 387 static int 388 bdev_ut_module_init(void) 389 { 390 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 391 sizeof(struct bdev_ut_channel), NULL); 392 spdk_bdev_module_init_done(&bdev_ut_if); 393 return 0; 394 } 395 396 static void 397 bdev_ut_module_fini(void) 398 { 399 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 400 } 401 402 struct spdk_bdev_module bdev_ut_if = { 403 .name = "bdev_ut", 404 .module_init = bdev_ut_module_init, 405 .module_fini = bdev_ut_module_fini, 406 .async_init = true, 407 }; 408 409 static void vbdev_ut_examine(struct spdk_bdev *bdev); 410 411 static int 412 vbdev_ut_module_init(void) 413 { 414 return 0; 415 } 416 417 static void 418 vbdev_ut_module_fini(void) 419 { 420 } 421 422 struct spdk_bdev_module vbdev_ut_if = { 423 .name = "vbdev_ut", 424 .module_init = vbdev_ut_module_init, 425 .module_fini = vbdev_ut_module_fini, 426 .examine_config = vbdev_ut_examine, 427 }; 428 429 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 430 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 431 432 static void 433 vbdev_ut_examine(struct spdk_bdev *bdev) 434 { 435 spdk_bdev_module_examine_done(&vbdev_ut_if); 436 } 437 438 static struct spdk_bdev * 439 allocate_bdev(char *name) 440 { 441 struct spdk_bdev *bdev; 442 int rc; 443 444 bdev = calloc(1, sizeof(*bdev)); 445 SPDK_CU_ASSERT_FATAL(bdev != NULL); 446 447 bdev->name = name; 448 bdev->fn_table = &fn_table; 449 bdev->module = &bdev_ut_if; 450 bdev->blockcnt = 1024; 451 bdev->blocklen = 512; 452 453 rc = spdk_bdev_register(bdev); 454 CU_ASSERT(rc == 0); 455 456 return bdev; 457 } 458 459 static struct spdk_bdev * 460 allocate_vbdev(char *name) 461 { 462 struct spdk_bdev *bdev; 463 int rc; 464 465 bdev = calloc(1, sizeof(*bdev)); 466 SPDK_CU_ASSERT_FATAL(bdev != NULL); 467 468 bdev->name = name; 469 bdev->fn_table = &fn_table; 470 bdev->module = &vbdev_ut_if; 471 472 rc = spdk_bdev_register(bdev); 473 CU_ASSERT(rc == 0); 474 475 return bdev; 476 } 477 478 static void 479 free_bdev(struct spdk_bdev *bdev) 480 { 481 spdk_bdev_unregister(bdev, NULL, NULL); 482 poll_threads(); 483 memset(bdev, 0xFF, sizeof(*bdev)); 484 free(bdev); 485 } 486 487 static void 488 free_vbdev(struct spdk_bdev *bdev) 489 { 490 spdk_bdev_unregister(bdev, NULL, NULL); 491 poll_threads(); 492 memset(bdev, 0xFF, sizeof(*bdev)); 493 free(bdev); 494 } 495 496 static void 497 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 498 { 499 const char *bdev_name; 500 501 CU_ASSERT(bdev != NULL); 502 CU_ASSERT(rc == 0); 503 bdev_name = spdk_bdev_get_name(bdev); 504 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 505 506 free(stat); 507 508 *(bool *)cb_arg = true; 509 } 510 511 static void 512 bdev_unregister_cb(void *cb_arg, int rc) 513 { 514 g_unregister_arg = cb_arg; 515 g_unregister_rc = rc; 516 } 517 518 static void 519 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 520 { 521 } 522 523 static void 524 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 525 { 526 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 527 528 g_event_type1 = type; 529 if (SPDK_BDEV_EVENT_REMOVE == type) { 530 spdk_bdev_close(desc); 531 } 532 } 533 534 static void 535 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 536 { 537 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 538 539 g_event_type2 = type; 540 if (SPDK_BDEV_EVENT_REMOVE == type) { 541 spdk_bdev_close(desc); 542 } 543 } 544 545 static void 546 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 547 { 548 g_event_type3 = type; 549 } 550 551 static void 552 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 553 { 554 g_event_type4 = type; 555 } 556 557 static void 558 get_device_stat_test(void) 559 { 560 struct spdk_bdev *bdev; 561 struct spdk_bdev_io_stat *stat; 562 bool done; 563 564 bdev = allocate_bdev("bdev0"); 565 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 566 if (stat == NULL) { 567 free_bdev(bdev); 568 return; 569 } 570 571 done = false; 572 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 573 while (!done) { poll_threads(); } 574 575 free_bdev(bdev); 576 } 577 578 static void 579 open_write_test(void) 580 { 581 struct spdk_bdev *bdev[9]; 582 struct spdk_bdev_desc *desc[9] = {}; 583 int rc; 584 585 /* 586 * Create a tree of bdevs to test various open w/ write cases. 587 * 588 * bdev0 through bdev3 are physical block devices, such as NVMe 589 * namespaces or Ceph block devices. 590 * 591 * bdev4 is a virtual bdev with multiple base bdevs. This models 592 * caching or RAID use cases. 593 * 594 * bdev5 through bdev7 are all virtual bdevs with the same base 595 * bdev (except bdev7). This models partitioning or logical volume 596 * use cases. 597 * 598 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 599 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 600 * models caching, RAID, partitioning or logical volumes use cases. 601 * 602 * bdev8 is a virtual bdev with multiple base bdevs, but these 603 * base bdevs are themselves virtual bdevs. 604 * 605 * bdev8 606 * | 607 * +----------+ 608 * | | 609 * bdev4 bdev5 bdev6 bdev7 610 * | | | | 611 * +---+---+ +---+ + +---+---+ 612 * | | \ | / \ 613 * bdev0 bdev1 bdev2 bdev3 614 */ 615 616 bdev[0] = allocate_bdev("bdev0"); 617 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 618 CU_ASSERT(rc == 0); 619 620 bdev[1] = allocate_bdev("bdev1"); 621 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 622 CU_ASSERT(rc == 0); 623 624 bdev[2] = allocate_bdev("bdev2"); 625 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 626 CU_ASSERT(rc == 0); 627 628 bdev[3] = allocate_bdev("bdev3"); 629 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 630 CU_ASSERT(rc == 0); 631 632 bdev[4] = allocate_vbdev("bdev4"); 633 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 634 CU_ASSERT(rc == 0); 635 636 bdev[5] = allocate_vbdev("bdev5"); 637 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 638 CU_ASSERT(rc == 0); 639 640 bdev[6] = allocate_vbdev("bdev6"); 641 642 bdev[7] = allocate_vbdev("bdev7"); 643 644 bdev[8] = allocate_vbdev("bdev8"); 645 646 /* Open bdev0 read-only. This should succeed. */ 647 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 648 CU_ASSERT(rc == 0); 649 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 650 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 651 spdk_bdev_close(desc[0]); 652 653 /* 654 * Open bdev1 read/write. This should fail since bdev1 has been claimed 655 * by a vbdev module. 656 */ 657 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 658 CU_ASSERT(rc == -EPERM); 659 660 /* 661 * Open bdev4 read/write. This should fail since bdev3 has been claimed 662 * by a vbdev module. 663 */ 664 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 665 CU_ASSERT(rc == -EPERM); 666 667 /* Open bdev4 read-only. This should succeed. */ 668 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 669 CU_ASSERT(rc == 0); 670 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 671 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 672 spdk_bdev_close(desc[4]); 673 674 /* 675 * Open bdev8 read/write. This should succeed since it is a leaf 676 * bdev. 677 */ 678 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 679 CU_ASSERT(rc == 0); 680 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 681 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 682 spdk_bdev_close(desc[8]); 683 684 /* 685 * Open bdev5 read/write. This should fail since bdev4 has been claimed 686 * by a vbdev module. 687 */ 688 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 689 CU_ASSERT(rc == -EPERM); 690 691 /* Open bdev4 read-only. This should succeed. */ 692 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 693 CU_ASSERT(rc == 0); 694 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 695 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 696 spdk_bdev_close(desc[5]); 697 698 free_vbdev(bdev[8]); 699 700 free_vbdev(bdev[5]); 701 free_vbdev(bdev[6]); 702 free_vbdev(bdev[7]); 703 704 free_vbdev(bdev[4]); 705 706 free_bdev(bdev[0]); 707 free_bdev(bdev[1]); 708 free_bdev(bdev[2]); 709 free_bdev(bdev[3]); 710 } 711 712 static void 713 claim_test(void) 714 { 715 struct spdk_bdev *bdev; 716 struct spdk_bdev_desc *desc, *open_desc; 717 int rc; 718 uint32_t count; 719 720 /* 721 * A vbdev that uses a read-only bdev may need it to remain read-only. 722 * To do so, it opens the bdev read-only, then claims it without 723 * passing a spdk_bdev_desc. 724 */ 725 bdev = allocate_bdev("bdev0"); 726 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 727 CU_ASSERT(rc == 0); 728 CU_ASSERT(desc->write == false); 729 730 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 731 CU_ASSERT(rc == 0); 732 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 733 734 /* There should be only one open descriptor and it should still be ro */ 735 count = 0; 736 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 737 CU_ASSERT(open_desc == desc); 738 CU_ASSERT(!open_desc->write); 739 count++; 740 } 741 CU_ASSERT(count == 1); 742 743 /* A read-only bdev is upgraded to read-write if desc is passed. */ 744 spdk_bdev_module_release_bdev(bdev); 745 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 746 CU_ASSERT(rc == 0); 747 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 748 749 /* There should be only one open descriptor and it should be rw */ 750 count = 0; 751 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 752 CU_ASSERT(open_desc == desc); 753 CU_ASSERT(open_desc->write); 754 count++; 755 } 756 CU_ASSERT(count == 1); 757 758 spdk_bdev_close(desc); 759 free_bdev(bdev); 760 } 761 762 static void 763 bytes_to_blocks_test(void) 764 { 765 struct spdk_bdev bdev; 766 uint64_t offset_blocks, num_blocks; 767 768 memset(&bdev, 0, sizeof(bdev)); 769 770 bdev.blocklen = 512; 771 772 /* All parameters valid */ 773 offset_blocks = 0; 774 num_blocks = 0; 775 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 776 CU_ASSERT(offset_blocks == 1); 777 CU_ASSERT(num_blocks == 2); 778 779 /* Offset not a block multiple */ 780 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 781 782 /* Length not a block multiple */ 783 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 784 785 /* In case blocklen not the power of two */ 786 bdev.blocklen = 100; 787 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 788 CU_ASSERT(offset_blocks == 1); 789 CU_ASSERT(num_blocks == 2); 790 791 /* Offset not a block multiple */ 792 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 793 794 /* Length not a block multiple */ 795 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 796 } 797 798 static void 799 num_blocks_test(void) 800 { 801 struct spdk_bdev bdev; 802 struct spdk_bdev_desc *desc = NULL; 803 int rc; 804 805 memset(&bdev, 0, sizeof(bdev)); 806 bdev.name = "num_blocks"; 807 bdev.fn_table = &fn_table; 808 bdev.module = &bdev_ut_if; 809 spdk_bdev_register(&bdev); 810 spdk_bdev_notify_blockcnt_change(&bdev, 50); 811 812 /* Growing block number */ 813 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 814 /* Shrinking block number */ 815 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 816 817 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 818 CU_ASSERT(rc == 0); 819 SPDK_CU_ASSERT_FATAL(desc != NULL); 820 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 821 822 /* Growing block number */ 823 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 824 /* Shrinking block number */ 825 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 826 827 g_event_type1 = 0xFF; 828 /* Growing block number */ 829 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 830 831 poll_threads(); 832 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 833 834 g_event_type1 = 0xFF; 835 /* Growing block number and closing */ 836 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 837 838 spdk_bdev_close(desc); 839 spdk_bdev_unregister(&bdev, NULL, NULL); 840 841 poll_threads(); 842 843 /* Callback is not called for closed device */ 844 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 845 } 846 847 static void 848 io_valid_test(void) 849 { 850 struct spdk_bdev bdev; 851 852 memset(&bdev, 0, sizeof(bdev)); 853 854 bdev.blocklen = 512; 855 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 856 857 spdk_bdev_notify_blockcnt_change(&bdev, 100); 858 859 /* All parameters valid */ 860 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 861 862 /* Last valid block */ 863 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 864 865 /* Offset past end of bdev */ 866 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 867 868 /* Offset + length past end of bdev */ 869 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 870 871 /* Offset near end of uint64_t range (2^64 - 1) */ 872 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 873 874 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 875 } 876 877 static void 878 alias_add_del_test(void) 879 { 880 struct spdk_bdev *bdev[3]; 881 int rc; 882 883 /* Creating and registering bdevs */ 884 bdev[0] = allocate_bdev("bdev0"); 885 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 886 887 bdev[1] = allocate_bdev("bdev1"); 888 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 889 890 bdev[2] = allocate_bdev("bdev2"); 891 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 892 893 poll_threads(); 894 895 /* 896 * Trying adding an alias identical to name. 897 * Alias is identical to name, so it can not be added to aliases list 898 */ 899 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 900 CU_ASSERT(rc == -EEXIST); 901 902 /* 903 * Trying to add empty alias, 904 * this one should fail 905 */ 906 rc = spdk_bdev_alias_add(bdev[0], NULL); 907 CU_ASSERT(rc == -EINVAL); 908 909 /* Trying adding same alias to two different registered bdevs */ 910 911 /* Alias is used first time, so this one should pass */ 912 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 913 CU_ASSERT(rc == 0); 914 915 /* Alias was added to another bdev, so this one should fail */ 916 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 917 CU_ASSERT(rc == -EEXIST); 918 919 /* Alias is used first time, so this one should pass */ 920 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 921 CU_ASSERT(rc == 0); 922 923 /* Trying removing an alias from registered bdevs */ 924 925 /* Alias is not on a bdev aliases list, so this one should fail */ 926 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 927 CU_ASSERT(rc == -ENOENT); 928 929 /* Alias is present on a bdev aliases list, so this one should pass */ 930 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 931 CU_ASSERT(rc == 0); 932 933 /* Alias is present on a bdev aliases list, so this one should pass */ 934 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 935 CU_ASSERT(rc == 0); 936 937 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 938 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 939 CU_ASSERT(rc != 0); 940 941 /* Trying to del all alias from empty alias list */ 942 spdk_bdev_alias_del_all(bdev[2]); 943 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 944 945 /* Trying to del all alias from non-empty alias list */ 946 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 947 CU_ASSERT(rc == 0); 948 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 949 CU_ASSERT(rc == 0); 950 spdk_bdev_alias_del_all(bdev[2]); 951 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 952 953 /* Unregister and free bdevs */ 954 spdk_bdev_unregister(bdev[0], NULL, NULL); 955 spdk_bdev_unregister(bdev[1], NULL, NULL); 956 spdk_bdev_unregister(bdev[2], NULL, NULL); 957 958 poll_threads(); 959 960 free(bdev[0]); 961 free(bdev[1]); 962 free(bdev[2]); 963 } 964 965 static void 966 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 967 { 968 g_io_done = true; 969 g_io_status = bdev_io->internal.status; 970 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 971 (bdev_io->u.bdev.zcopy.start)) { 972 g_zcopy_bdev_io = bdev_io; 973 } else { 974 spdk_bdev_free_io(bdev_io); 975 g_zcopy_bdev_io = NULL; 976 } 977 } 978 979 static void 980 bdev_init_cb(void *arg, int rc) 981 { 982 CU_ASSERT(rc == 0); 983 } 984 985 static void 986 bdev_fini_cb(void *arg) 987 { 988 } 989 990 struct bdev_ut_io_wait_entry { 991 struct spdk_bdev_io_wait_entry entry; 992 struct spdk_io_channel *io_ch; 993 struct spdk_bdev_desc *desc; 994 bool submitted; 995 }; 996 997 static void 998 io_wait_cb(void *arg) 999 { 1000 struct bdev_ut_io_wait_entry *entry = arg; 1001 int rc; 1002 1003 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1004 CU_ASSERT(rc == 0); 1005 entry->submitted = true; 1006 } 1007 1008 static void 1009 bdev_io_types_test(void) 1010 { 1011 struct spdk_bdev *bdev; 1012 struct spdk_bdev_desc *desc = NULL; 1013 struct spdk_io_channel *io_ch; 1014 struct spdk_bdev_opts bdev_opts = {}; 1015 int rc; 1016 1017 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1018 bdev_opts.bdev_io_pool_size = 4; 1019 bdev_opts.bdev_io_cache_size = 2; 1020 1021 rc = spdk_bdev_set_opts(&bdev_opts); 1022 CU_ASSERT(rc == 0); 1023 spdk_bdev_initialize(bdev_init_cb, NULL); 1024 poll_threads(); 1025 1026 bdev = allocate_bdev("bdev0"); 1027 1028 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1029 CU_ASSERT(rc == 0); 1030 poll_threads(); 1031 SPDK_CU_ASSERT_FATAL(desc != NULL); 1032 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1033 io_ch = spdk_bdev_get_io_channel(desc); 1034 CU_ASSERT(io_ch != NULL); 1035 1036 /* WRITE and WRITE ZEROES are not supported */ 1037 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1038 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1039 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1040 CU_ASSERT(rc == -ENOTSUP); 1041 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1042 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1043 1044 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1045 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1046 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1047 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1048 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1049 CU_ASSERT(rc == -ENOTSUP); 1050 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1051 CU_ASSERT(rc == -ENOTSUP); 1052 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1053 CU_ASSERT(rc == -ENOTSUP); 1054 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1055 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1056 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1057 1058 spdk_put_io_channel(io_ch); 1059 spdk_bdev_close(desc); 1060 free_bdev(bdev); 1061 spdk_bdev_finish(bdev_fini_cb, NULL); 1062 poll_threads(); 1063 } 1064 1065 static void 1066 bdev_io_wait_test(void) 1067 { 1068 struct spdk_bdev *bdev; 1069 struct spdk_bdev_desc *desc = NULL; 1070 struct spdk_io_channel *io_ch; 1071 struct spdk_bdev_opts bdev_opts = {}; 1072 struct bdev_ut_io_wait_entry io_wait_entry; 1073 struct bdev_ut_io_wait_entry io_wait_entry2; 1074 int rc; 1075 1076 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1077 bdev_opts.bdev_io_pool_size = 4; 1078 bdev_opts.bdev_io_cache_size = 2; 1079 1080 rc = spdk_bdev_set_opts(&bdev_opts); 1081 CU_ASSERT(rc == 0); 1082 spdk_bdev_initialize(bdev_init_cb, NULL); 1083 poll_threads(); 1084 1085 bdev = allocate_bdev("bdev0"); 1086 1087 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1088 CU_ASSERT(rc == 0); 1089 poll_threads(); 1090 SPDK_CU_ASSERT_FATAL(desc != NULL); 1091 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1092 io_ch = spdk_bdev_get_io_channel(desc); 1093 CU_ASSERT(io_ch != NULL); 1094 1095 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1096 CU_ASSERT(rc == 0); 1097 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1098 CU_ASSERT(rc == 0); 1099 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1100 CU_ASSERT(rc == 0); 1101 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1102 CU_ASSERT(rc == 0); 1103 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1104 1105 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1106 CU_ASSERT(rc == -ENOMEM); 1107 1108 io_wait_entry.entry.bdev = bdev; 1109 io_wait_entry.entry.cb_fn = io_wait_cb; 1110 io_wait_entry.entry.cb_arg = &io_wait_entry; 1111 io_wait_entry.io_ch = io_ch; 1112 io_wait_entry.desc = desc; 1113 io_wait_entry.submitted = false; 1114 /* Cannot use the same io_wait_entry for two different calls. */ 1115 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1116 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1117 1118 /* Queue two I/O waits. */ 1119 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1120 CU_ASSERT(rc == 0); 1121 CU_ASSERT(io_wait_entry.submitted == false); 1122 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1123 CU_ASSERT(rc == 0); 1124 CU_ASSERT(io_wait_entry2.submitted == false); 1125 1126 stub_complete_io(1); 1127 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1128 CU_ASSERT(io_wait_entry.submitted == true); 1129 CU_ASSERT(io_wait_entry2.submitted == false); 1130 1131 stub_complete_io(1); 1132 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1133 CU_ASSERT(io_wait_entry2.submitted == true); 1134 1135 stub_complete_io(4); 1136 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1137 1138 spdk_put_io_channel(io_ch); 1139 spdk_bdev_close(desc); 1140 free_bdev(bdev); 1141 spdk_bdev_finish(bdev_fini_cb, NULL); 1142 poll_threads(); 1143 } 1144 1145 static void 1146 bdev_io_spans_split_test(void) 1147 { 1148 struct spdk_bdev bdev; 1149 struct spdk_bdev_io bdev_io; 1150 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1151 1152 memset(&bdev, 0, sizeof(bdev)); 1153 bdev_io.u.bdev.iovs = iov; 1154 1155 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1156 bdev.optimal_io_boundary = 0; 1157 bdev.max_segment_size = 0; 1158 bdev.max_num_segments = 0; 1159 bdev_io.bdev = &bdev; 1160 1161 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1162 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1163 1164 bdev.split_on_optimal_io_boundary = true; 1165 bdev.optimal_io_boundary = 32; 1166 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1167 1168 /* RESETs are not based on LBAs - so this should return false. */ 1169 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1170 1171 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1172 bdev_io.u.bdev.offset_blocks = 0; 1173 bdev_io.u.bdev.num_blocks = 32; 1174 1175 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1176 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1177 1178 bdev_io.u.bdev.num_blocks = 33; 1179 1180 /* This I/O spans a boundary. */ 1181 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1182 1183 bdev_io.u.bdev.num_blocks = 32; 1184 bdev.max_segment_size = 512 * 32; 1185 bdev.max_num_segments = 1; 1186 bdev_io.u.bdev.iovcnt = 1; 1187 iov[0].iov_len = 512; 1188 1189 /* Does not cross and exceed max_size or max_segs */ 1190 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1191 1192 bdev.split_on_optimal_io_boundary = false; 1193 bdev.max_segment_size = 512; 1194 bdev.max_num_segments = 1; 1195 bdev_io.u.bdev.iovcnt = 2; 1196 1197 /* Exceed max_segs */ 1198 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1199 1200 bdev.max_num_segments = 2; 1201 iov[0].iov_len = 513; 1202 iov[1].iov_len = 512; 1203 1204 /* Exceed max_sizes */ 1205 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1206 } 1207 1208 static void 1209 bdev_io_boundary_split_test(void) 1210 { 1211 struct spdk_bdev *bdev; 1212 struct spdk_bdev_desc *desc = NULL; 1213 struct spdk_io_channel *io_ch; 1214 struct spdk_bdev_opts bdev_opts = {}; 1215 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1216 struct ut_expected_io *expected_io; 1217 void *md_buf = (void *)0xFF000000; 1218 uint64_t i; 1219 int rc; 1220 1221 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1222 bdev_opts.bdev_io_pool_size = 512; 1223 bdev_opts.bdev_io_cache_size = 64; 1224 1225 rc = spdk_bdev_set_opts(&bdev_opts); 1226 CU_ASSERT(rc == 0); 1227 spdk_bdev_initialize(bdev_init_cb, NULL); 1228 1229 bdev = allocate_bdev("bdev0"); 1230 1231 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1232 CU_ASSERT(rc == 0); 1233 SPDK_CU_ASSERT_FATAL(desc != NULL); 1234 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1235 io_ch = spdk_bdev_get_io_channel(desc); 1236 CU_ASSERT(io_ch != NULL); 1237 1238 bdev->optimal_io_boundary = 16; 1239 bdev->split_on_optimal_io_boundary = false; 1240 1241 g_io_done = false; 1242 1243 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1244 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1245 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1246 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1247 1248 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1249 CU_ASSERT(rc == 0); 1250 CU_ASSERT(g_io_done == false); 1251 1252 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1253 stub_complete_io(1); 1254 CU_ASSERT(g_io_done == true); 1255 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1256 1257 bdev->split_on_optimal_io_boundary = true; 1258 bdev->md_interleave = false; 1259 bdev->md_len = 8; 1260 1261 /* Now test that a single-vector command is split correctly. 1262 * Offset 14, length 8, payload 0xF000 1263 * Child - Offset 14, length 2, payload 0xF000 1264 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1265 * 1266 * Set up the expected values before calling spdk_bdev_read_blocks 1267 */ 1268 g_io_done = false; 1269 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1270 expected_io->md_buf = md_buf; 1271 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1272 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1273 1274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1275 expected_io->md_buf = md_buf + 2 * 8; 1276 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1277 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1278 1279 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1280 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1281 14, 8, io_done, NULL); 1282 CU_ASSERT(rc == 0); 1283 CU_ASSERT(g_io_done == false); 1284 1285 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1286 stub_complete_io(2); 1287 CU_ASSERT(g_io_done == true); 1288 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1289 1290 /* Now set up a more complex, multi-vector command that needs to be split, 1291 * including splitting iovecs. 1292 */ 1293 iov[0].iov_base = (void *)0x10000; 1294 iov[0].iov_len = 512; 1295 iov[1].iov_base = (void *)0x20000; 1296 iov[1].iov_len = 20 * 512; 1297 iov[2].iov_base = (void *)0x30000; 1298 iov[2].iov_len = 11 * 512; 1299 1300 g_io_done = false; 1301 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1302 expected_io->md_buf = md_buf; 1303 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1304 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1305 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1306 1307 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1308 expected_io->md_buf = md_buf + 2 * 8; 1309 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1310 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1311 1312 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1313 expected_io->md_buf = md_buf + 18 * 8; 1314 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1315 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1316 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1317 1318 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1319 14, 32, io_done, NULL); 1320 CU_ASSERT(rc == 0); 1321 CU_ASSERT(g_io_done == false); 1322 1323 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1324 stub_complete_io(3); 1325 CU_ASSERT(g_io_done == true); 1326 1327 /* Test multi vector command that needs to be split by strip and then needs to be 1328 * split further due to the capacity of child iovs. 1329 */ 1330 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1331 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1332 iov[i].iov_len = 512; 1333 } 1334 1335 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1336 g_io_done = false; 1337 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1338 BDEV_IO_NUM_CHILD_IOV); 1339 expected_io->md_buf = md_buf; 1340 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1341 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1342 } 1343 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1344 1345 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1346 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1347 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1348 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1349 ut_expected_io_set_iov(expected_io, i, 1350 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1351 } 1352 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1353 1354 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1355 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1356 CU_ASSERT(rc == 0); 1357 CU_ASSERT(g_io_done == false); 1358 1359 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1360 stub_complete_io(1); 1361 CU_ASSERT(g_io_done == false); 1362 1363 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1364 stub_complete_io(1); 1365 CU_ASSERT(g_io_done == true); 1366 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1367 1368 /* Test multi vector command that needs to be split by strip and then needs to be 1369 * split further due to the capacity of child iovs. In this case, the length of 1370 * the rest of iovec array with an I/O boundary is the multiple of block size. 1371 */ 1372 1373 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1374 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1375 */ 1376 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1377 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1378 iov[i].iov_len = 512; 1379 } 1380 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1381 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1382 iov[i].iov_len = 256; 1383 } 1384 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1385 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1386 1387 /* Add an extra iovec to trigger split */ 1388 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1389 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1390 1391 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1392 g_io_done = false; 1393 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1394 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1395 expected_io->md_buf = md_buf; 1396 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1397 ut_expected_io_set_iov(expected_io, i, 1398 (void *)((i + 1) * 0x10000), 512); 1399 } 1400 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1401 ut_expected_io_set_iov(expected_io, i, 1402 (void *)((i + 1) * 0x10000), 256); 1403 } 1404 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1405 1406 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1407 1, 1); 1408 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1409 ut_expected_io_set_iov(expected_io, 0, 1410 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1411 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1412 1413 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1414 1, 1); 1415 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1416 ut_expected_io_set_iov(expected_io, 0, 1417 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1418 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1419 1420 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1421 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1422 CU_ASSERT(rc == 0); 1423 CU_ASSERT(g_io_done == false); 1424 1425 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1426 stub_complete_io(1); 1427 CU_ASSERT(g_io_done == false); 1428 1429 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1430 stub_complete_io(2); 1431 CU_ASSERT(g_io_done == true); 1432 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1433 1434 /* Test multi vector command that needs to be split by strip and then needs to be 1435 * split further due to the capacity of child iovs, the child request offset should 1436 * be rewind to last aligned offset and go success without error. 1437 */ 1438 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1439 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1440 iov[i].iov_len = 512; 1441 } 1442 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1443 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1444 1445 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1446 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1447 1448 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1449 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1450 1451 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1452 g_io_done = false; 1453 g_io_status = 0; 1454 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1455 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1456 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1457 expected_io->md_buf = md_buf; 1458 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1459 ut_expected_io_set_iov(expected_io, i, 1460 (void *)((i + 1) * 0x10000), 512); 1461 } 1462 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1463 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1464 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1465 1, 2); 1466 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1467 ut_expected_io_set_iov(expected_io, 0, 1468 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1469 ut_expected_io_set_iov(expected_io, 1, 1470 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1471 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1472 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1473 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1474 1, 1); 1475 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1476 ut_expected_io_set_iov(expected_io, 0, 1477 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1478 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1479 1480 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1481 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1482 CU_ASSERT(rc == 0); 1483 CU_ASSERT(g_io_done == false); 1484 1485 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1486 stub_complete_io(1); 1487 CU_ASSERT(g_io_done == false); 1488 1489 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1490 stub_complete_io(2); 1491 CU_ASSERT(g_io_done == true); 1492 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1493 1494 /* Test multi vector command that needs to be split due to the IO boundary and 1495 * the capacity of child iovs. Especially test the case when the command is 1496 * split due to the capacity of child iovs, the tail address is not aligned with 1497 * block size and is rewinded to the aligned address. 1498 * 1499 * The iovecs used in read request is complex but is based on the data 1500 * collected in the real issue. We change the base addresses but keep the lengths 1501 * not to loose the credibility of the test. 1502 */ 1503 bdev->optimal_io_boundary = 128; 1504 g_io_done = false; 1505 g_io_status = 0; 1506 1507 for (i = 0; i < 31; i++) { 1508 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1509 iov[i].iov_len = 1024; 1510 } 1511 iov[31].iov_base = (void *)0xFEED1F00000; 1512 iov[31].iov_len = 32768; 1513 iov[32].iov_base = (void *)0xFEED2000000; 1514 iov[32].iov_len = 160; 1515 iov[33].iov_base = (void *)0xFEED2100000; 1516 iov[33].iov_len = 4096; 1517 iov[34].iov_base = (void *)0xFEED2200000; 1518 iov[34].iov_len = 4096; 1519 iov[35].iov_base = (void *)0xFEED2300000; 1520 iov[35].iov_len = 4096; 1521 iov[36].iov_base = (void *)0xFEED2400000; 1522 iov[36].iov_len = 4096; 1523 iov[37].iov_base = (void *)0xFEED2500000; 1524 iov[37].iov_len = 4096; 1525 iov[38].iov_base = (void *)0xFEED2600000; 1526 iov[38].iov_len = 4096; 1527 iov[39].iov_base = (void *)0xFEED2700000; 1528 iov[39].iov_len = 4096; 1529 iov[40].iov_base = (void *)0xFEED2800000; 1530 iov[40].iov_len = 4096; 1531 iov[41].iov_base = (void *)0xFEED2900000; 1532 iov[41].iov_len = 4096; 1533 iov[42].iov_base = (void *)0xFEED2A00000; 1534 iov[42].iov_len = 4096; 1535 iov[43].iov_base = (void *)0xFEED2B00000; 1536 iov[43].iov_len = 12288; 1537 iov[44].iov_base = (void *)0xFEED2C00000; 1538 iov[44].iov_len = 8192; 1539 iov[45].iov_base = (void *)0xFEED2F00000; 1540 iov[45].iov_len = 4096; 1541 iov[46].iov_base = (void *)0xFEED3000000; 1542 iov[46].iov_len = 4096; 1543 iov[47].iov_base = (void *)0xFEED3100000; 1544 iov[47].iov_len = 4096; 1545 iov[48].iov_base = (void *)0xFEED3200000; 1546 iov[48].iov_len = 24576; 1547 iov[49].iov_base = (void *)0xFEED3300000; 1548 iov[49].iov_len = 16384; 1549 iov[50].iov_base = (void *)0xFEED3400000; 1550 iov[50].iov_len = 12288; 1551 iov[51].iov_base = (void *)0xFEED3500000; 1552 iov[51].iov_len = 4096; 1553 iov[52].iov_base = (void *)0xFEED3600000; 1554 iov[52].iov_len = 4096; 1555 iov[53].iov_base = (void *)0xFEED3700000; 1556 iov[53].iov_len = 4096; 1557 iov[54].iov_base = (void *)0xFEED3800000; 1558 iov[54].iov_len = 28672; 1559 iov[55].iov_base = (void *)0xFEED3900000; 1560 iov[55].iov_len = 20480; 1561 iov[56].iov_base = (void *)0xFEED3A00000; 1562 iov[56].iov_len = 4096; 1563 iov[57].iov_base = (void *)0xFEED3B00000; 1564 iov[57].iov_len = 12288; 1565 iov[58].iov_base = (void *)0xFEED3C00000; 1566 iov[58].iov_len = 4096; 1567 iov[59].iov_base = (void *)0xFEED3D00000; 1568 iov[59].iov_len = 4096; 1569 iov[60].iov_base = (void *)0xFEED3E00000; 1570 iov[60].iov_len = 352; 1571 1572 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1573 * of child iovs, 1574 */ 1575 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1576 expected_io->md_buf = md_buf; 1577 for (i = 0; i < 32; i++) { 1578 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1579 } 1580 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1581 1582 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1583 * split by the IO boundary requirement. 1584 */ 1585 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1586 expected_io->md_buf = md_buf + 126 * 8; 1587 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1588 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1589 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1590 1591 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1592 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1593 */ 1594 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1595 expected_io->md_buf = md_buf + 128 * 8; 1596 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1597 iov[33].iov_len - 864); 1598 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1599 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1600 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1601 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1602 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1603 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1604 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1605 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1606 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1607 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1608 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1609 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1610 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1611 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1612 1613 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1614 * first 864 bytes of iov[52] split by the IO boundary requirement. 1615 */ 1616 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1617 expected_io->md_buf = md_buf + 256 * 8; 1618 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1619 iov[46].iov_len - 864); 1620 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1621 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1622 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1623 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1624 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1625 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1626 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1627 1628 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1629 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1630 */ 1631 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1632 expected_io->md_buf = md_buf + 384 * 8; 1633 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1634 iov[52].iov_len - 864); 1635 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1636 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1637 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1638 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1639 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1640 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1641 1642 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1643 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1644 */ 1645 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1646 expected_io->md_buf = md_buf + 512 * 8; 1647 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1648 iov[57].iov_len - 4960); 1649 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1650 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1651 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1652 1653 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1654 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1655 expected_io->md_buf = md_buf + 542 * 8; 1656 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1657 iov[59].iov_len - 3936); 1658 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1659 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1660 1661 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1662 0, 543, io_done, NULL); 1663 CU_ASSERT(rc == 0); 1664 CU_ASSERT(g_io_done == false); 1665 1666 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1667 stub_complete_io(1); 1668 CU_ASSERT(g_io_done == false); 1669 1670 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1671 stub_complete_io(5); 1672 CU_ASSERT(g_io_done == false); 1673 1674 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1675 stub_complete_io(1); 1676 CU_ASSERT(g_io_done == true); 1677 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1678 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1679 1680 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1681 * split, so test that. 1682 */ 1683 bdev->optimal_io_boundary = 15; 1684 g_io_done = false; 1685 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1686 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1687 1688 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1689 CU_ASSERT(rc == 0); 1690 CU_ASSERT(g_io_done == false); 1691 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1692 stub_complete_io(1); 1693 CU_ASSERT(g_io_done == true); 1694 1695 /* Test an UNMAP. This should also not be split. */ 1696 bdev->optimal_io_boundary = 16; 1697 g_io_done = false; 1698 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1699 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1700 1701 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1702 CU_ASSERT(rc == 0); 1703 CU_ASSERT(g_io_done == false); 1704 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1705 stub_complete_io(1); 1706 CU_ASSERT(g_io_done == true); 1707 1708 /* Test a FLUSH. This should also not be split. */ 1709 bdev->optimal_io_boundary = 16; 1710 g_io_done = false; 1711 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1712 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1713 1714 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1715 CU_ASSERT(rc == 0); 1716 CU_ASSERT(g_io_done == false); 1717 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1718 stub_complete_io(1); 1719 CU_ASSERT(g_io_done == true); 1720 1721 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1722 1723 /* Children requests return an error status */ 1724 bdev->optimal_io_boundary = 16; 1725 iov[0].iov_base = (void *)0x10000; 1726 iov[0].iov_len = 512 * 64; 1727 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1728 g_io_done = false; 1729 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1730 1731 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1732 CU_ASSERT(rc == 0); 1733 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1734 stub_complete_io(4); 1735 CU_ASSERT(g_io_done == false); 1736 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1737 stub_complete_io(1); 1738 CU_ASSERT(g_io_done == true); 1739 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1740 1741 /* Test if a multi vector command terminated with failure before continuing 1742 * splitting process when one of child I/O failed. 1743 * The multi vector command is as same as the above that needs to be split by strip 1744 * and then needs to be split further due to the capacity of child iovs. 1745 */ 1746 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1747 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1748 iov[i].iov_len = 512; 1749 } 1750 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1751 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1752 1753 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1754 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1755 1756 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1757 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1758 1759 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1760 1761 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1762 g_io_done = false; 1763 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1764 1765 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1766 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1767 CU_ASSERT(rc == 0); 1768 CU_ASSERT(g_io_done == false); 1769 1770 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1771 stub_complete_io(1); 1772 CU_ASSERT(g_io_done == true); 1773 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1774 1775 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1776 1777 /* for this test we will create the following conditions to hit the code path where 1778 * we are trying to send and IO following a split that has no iovs because we had to 1779 * trim them for alignment reasons. 1780 * 1781 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1782 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1783 * position 30 and overshoot by 0x2e. 1784 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1785 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1786 * which eliniates that vector so we just send the first split IO with 30 vectors 1787 * and let the completion pick up the last 2 vectors. 1788 */ 1789 bdev->optimal_io_boundary = 32; 1790 bdev->split_on_optimal_io_boundary = true; 1791 g_io_done = false; 1792 1793 /* Init all parent IOVs to 0x212 */ 1794 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1795 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1796 iov[i].iov_len = 0x212; 1797 } 1798 1799 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1800 BDEV_IO_NUM_CHILD_IOV - 1); 1801 /* expect 0-29 to be 1:1 with the parent iov */ 1802 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1803 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1804 } 1805 1806 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1807 * where 0x1e is the amount we overshot the 16K boundary 1808 */ 1809 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1810 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1811 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1812 1813 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1814 * shortened that take it to the next boundary and then a final one to get us to 1815 * 0x4200 bytes for the IO. 1816 */ 1817 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1818 BDEV_IO_NUM_CHILD_IOV, 2); 1819 /* position 30 picked up the remaining bytes to the next boundary */ 1820 ut_expected_io_set_iov(expected_io, 0, 1821 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1822 1823 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1824 ut_expected_io_set_iov(expected_io, 1, 1825 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1826 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1827 1828 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1829 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1830 CU_ASSERT(rc == 0); 1831 CU_ASSERT(g_io_done == false); 1832 1833 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1834 stub_complete_io(1); 1835 CU_ASSERT(g_io_done == false); 1836 1837 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1838 stub_complete_io(1); 1839 CU_ASSERT(g_io_done == true); 1840 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1841 1842 spdk_put_io_channel(io_ch); 1843 spdk_bdev_close(desc); 1844 free_bdev(bdev); 1845 spdk_bdev_finish(bdev_fini_cb, NULL); 1846 poll_threads(); 1847 } 1848 1849 static void 1850 bdev_io_max_size_and_segment_split_test(void) 1851 { 1852 struct spdk_bdev *bdev; 1853 struct spdk_bdev_desc *desc = NULL; 1854 struct spdk_io_channel *io_ch; 1855 struct spdk_bdev_opts bdev_opts = {}; 1856 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1857 struct ut_expected_io *expected_io; 1858 uint64_t i; 1859 int rc; 1860 1861 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1862 bdev_opts.bdev_io_pool_size = 512; 1863 bdev_opts.bdev_io_cache_size = 64; 1864 1865 bdev_opts.opts_size = sizeof(bdev_opts); 1866 rc = spdk_bdev_set_opts(&bdev_opts); 1867 CU_ASSERT(rc == 0); 1868 spdk_bdev_initialize(bdev_init_cb, NULL); 1869 1870 bdev = allocate_bdev("bdev0"); 1871 1872 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1873 CU_ASSERT(rc == 0); 1874 SPDK_CU_ASSERT_FATAL(desc != NULL); 1875 io_ch = spdk_bdev_get_io_channel(desc); 1876 CU_ASSERT(io_ch != NULL); 1877 1878 bdev->split_on_optimal_io_boundary = false; 1879 bdev->optimal_io_boundary = 0; 1880 1881 /* Case 0 max_num_segments == 0. 1882 * but segment size 2 * 512 > 512 1883 */ 1884 bdev->max_segment_size = 512; 1885 bdev->max_num_segments = 0; 1886 g_io_done = false; 1887 1888 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1889 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1890 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1891 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1892 1893 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1894 CU_ASSERT(rc == 0); 1895 CU_ASSERT(g_io_done == false); 1896 1897 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1898 stub_complete_io(1); 1899 CU_ASSERT(g_io_done == true); 1900 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1901 1902 /* Case 1 max_segment_size == 0 1903 * but iov num 2 > 1. 1904 */ 1905 bdev->max_segment_size = 0; 1906 bdev->max_num_segments = 1; 1907 g_io_done = false; 1908 1909 iov[0].iov_base = (void *)0x10000; 1910 iov[0].iov_len = 512; 1911 iov[1].iov_base = (void *)0x20000; 1912 iov[1].iov_len = 8 * 512; 1913 1914 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1915 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1916 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1917 1918 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1919 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1920 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1921 1922 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1923 CU_ASSERT(rc == 0); 1924 CU_ASSERT(g_io_done == false); 1925 1926 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1927 stub_complete_io(2); 1928 CU_ASSERT(g_io_done == true); 1929 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1930 1931 /* Test that a non-vector command is split correctly. 1932 * Set up the expected values before calling spdk_bdev_read_blocks 1933 */ 1934 bdev->max_segment_size = 512; 1935 bdev->max_num_segments = 1; 1936 g_io_done = false; 1937 1938 /* Child IO 0 */ 1939 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1940 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1941 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1942 1943 /* Child IO 1 */ 1944 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1945 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1946 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1947 1948 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1949 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1950 CU_ASSERT(rc == 0); 1951 CU_ASSERT(g_io_done == false); 1952 1953 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1954 stub_complete_io(2); 1955 CU_ASSERT(g_io_done == true); 1956 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1957 1958 /* Now set up a more complex, multi-vector command that needs to be split, 1959 * including splitting iovecs. 1960 */ 1961 bdev->max_segment_size = 2 * 512; 1962 bdev->max_num_segments = 1; 1963 g_io_done = false; 1964 1965 iov[0].iov_base = (void *)0x10000; 1966 iov[0].iov_len = 2 * 512; 1967 iov[1].iov_base = (void *)0x20000; 1968 iov[1].iov_len = 4 * 512; 1969 iov[2].iov_base = (void *)0x30000; 1970 iov[2].iov_len = 6 * 512; 1971 1972 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1973 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 1974 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1975 1976 /* Split iov[1].size to 2 iov entries then split the segments */ 1977 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 1978 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 1979 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1980 1981 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 1982 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 1983 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1984 1985 /* Split iov[2].size to 3 iov entries then split the segments */ 1986 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 1987 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 1988 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1989 1990 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 1991 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 1992 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1993 1994 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 1995 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 1996 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1997 1998 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 1999 CU_ASSERT(rc == 0); 2000 CU_ASSERT(g_io_done == false); 2001 2002 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2003 stub_complete_io(6); 2004 CU_ASSERT(g_io_done == true); 2005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2006 2007 /* Test multi vector command that needs to be split by strip and then needs to be 2008 * split further due to the capacity of parent IO child iovs. 2009 */ 2010 bdev->max_segment_size = 512; 2011 bdev->max_num_segments = 1; 2012 g_io_done = false; 2013 2014 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2015 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2016 iov[i].iov_len = 512 * 2; 2017 } 2018 2019 /* Each input iov.size is split into 2 iovs, 2020 * half of the input iov can fill all child iov entries of a single IO. 2021 */ 2022 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2023 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2024 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2025 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2026 2027 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2028 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2029 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2030 } 2031 2032 /* The remaining iov is split in the second round */ 2033 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2034 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2035 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2036 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2037 2038 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2039 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2040 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2041 } 2042 2043 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2044 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2045 CU_ASSERT(rc == 0); 2046 CU_ASSERT(g_io_done == false); 2047 2048 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2049 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2050 CU_ASSERT(g_io_done == false); 2051 2052 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2053 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2054 CU_ASSERT(g_io_done == true); 2055 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2056 2057 /* A wrong case, a child IO that is divided does 2058 * not meet the principle of multiples of block size, 2059 * and exits with error 2060 */ 2061 bdev->max_segment_size = 512; 2062 bdev->max_num_segments = 1; 2063 g_io_done = false; 2064 2065 iov[0].iov_base = (void *)0x10000; 2066 iov[0].iov_len = 512 + 256; 2067 iov[1].iov_base = (void *)0x20000; 2068 iov[1].iov_len = 256; 2069 2070 /* iov[0] is split to 512 and 256. 2071 * 256 is less than a block size, and it is found 2072 * in the next round of split that it is the first child IO smaller than 2073 * the block size, so the error exit 2074 */ 2075 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2076 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2077 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2078 2079 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2080 CU_ASSERT(rc == 0); 2081 CU_ASSERT(g_io_done == false); 2082 2083 /* First child IO is OK */ 2084 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2085 stub_complete_io(1); 2086 CU_ASSERT(g_io_done == true); 2087 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2088 2089 /* error exit */ 2090 stub_complete_io(1); 2091 CU_ASSERT(g_io_done == true); 2092 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2093 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2094 2095 /* Test multi vector command that needs to be split by strip and then needs to be 2096 * split further due to the capacity of child iovs. 2097 * 2098 * In this case, the last two iovs need to be split, but it will exceed the capacity 2099 * of child iovs, so it needs to wait until the first batch completed. 2100 */ 2101 bdev->max_segment_size = 512; 2102 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2103 g_io_done = false; 2104 2105 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2106 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2107 iov[i].iov_len = 512; 2108 } 2109 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2110 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2111 iov[i].iov_len = 512 * 2; 2112 } 2113 2114 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2115 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2116 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2117 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2118 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2119 } 2120 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2121 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2122 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2123 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2124 2125 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2126 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2127 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2128 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2129 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2130 2131 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2132 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2133 CU_ASSERT(rc == 0); 2134 CU_ASSERT(g_io_done == false); 2135 2136 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2137 stub_complete_io(1); 2138 CU_ASSERT(g_io_done == false); 2139 2140 /* Next round */ 2141 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2142 stub_complete_io(1); 2143 CU_ASSERT(g_io_done == true); 2144 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2145 2146 /* This case is similar to the previous one, but the io composed of 2147 * the last few entries of child iov is not enough for a blocklen, so they 2148 * cannot be put into this IO, but wait until the next time. 2149 */ 2150 bdev->max_segment_size = 512; 2151 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2152 g_io_done = false; 2153 2154 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2155 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2156 iov[i].iov_len = 512; 2157 } 2158 2159 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2160 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2161 iov[i].iov_len = 128; 2162 } 2163 2164 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2165 * Because the left 2 iov is not enough for a blocklen. 2166 */ 2167 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2168 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2169 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2170 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2171 } 2172 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2173 2174 /* The second child io waits until the end of the first child io before executing. 2175 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2176 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2177 */ 2178 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2179 1, 4); 2180 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2181 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2182 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2183 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2184 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2185 2186 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2187 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2188 CU_ASSERT(rc == 0); 2189 CU_ASSERT(g_io_done == false); 2190 2191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2192 stub_complete_io(1); 2193 CU_ASSERT(g_io_done == false); 2194 2195 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2196 stub_complete_io(1); 2197 CU_ASSERT(g_io_done == true); 2198 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2199 2200 /* A very complicated case. Each sg entry exceeds max_segment_size and 2201 * needs to be split. At the same time, child io must be a multiple of blocklen. 2202 * At the same time, child iovcnt exceeds parent iovcnt. 2203 */ 2204 bdev->max_segment_size = 512 + 128; 2205 bdev->max_num_segments = 3; 2206 g_io_done = false; 2207 2208 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2209 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2210 iov[i].iov_len = 512 + 256; 2211 } 2212 2213 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2214 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2215 iov[i].iov_len = 512 + 128; 2216 } 2217 2218 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2219 * Consume 4 parent IO iov entries per for() round and 6 block size. 2220 * Generate 9 child IOs. 2221 */ 2222 for (i = 0; i < 3; i++) { 2223 uint32_t j = i * 4; 2224 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2225 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2226 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2227 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2228 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2229 2230 /* Child io must be a multiple of blocklen 2231 * iov[j + 2] must be split. If the third entry is also added, 2232 * the multiple of blocklen cannot be guaranteed. But it still 2233 * occupies one iov entry of the parent child iov. 2234 */ 2235 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2236 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2237 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2238 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2239 2240 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2241 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2242 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2243 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2244 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2245 } 2246 2247 /* Child iov position at 27, the 10th child IO 2248 * iov entry index is 3 * 4 and offset is 3 * 6 2249 */ 2250 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2251 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2252 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2253 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2254 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2255 2256 /* Child iov position at 30, the 11th child IO */ 2257 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2258 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2259 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2260 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2261 2262 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2263 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2264 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2265 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2266 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2267 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2268 2269 /* Consume 9 child IOs and 27 child iov entries. 2270 * Consume 4 parent IO iov entries per for() round and 6 block size. 2271 * Parent IO iov index start from 16 and block offset start from 24 2272 */ 2273 for (i = 0; i < 3; i++) { 2274 uint32_t j = i * 4 + 16; 2275 uint32_t offset = i * 6 + 24; 2276 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2277 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2278 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2279 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2280 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2281 2282 /* Child io must be a multiple of blocklen 2283 * iov[j + 2] must be split. If the third entry is also added, 2284 * the multiple of blocklen cannot be guaranteed. But it still 2285 * occupies one iov entry of the parent child iov. 2286 */ 2287 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2288 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2289 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2290 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2291 2292 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2293 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2294 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2295 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2296 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2297 } 2298 2299 /* The 22th child IO, child iov position at 30 */ 2300 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2301 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2302 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2303 2304 /* The third round */ 2305 /* Here is the 23nd child IO and child iovpos is 0 */ 2306 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2307 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2308 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2309 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2310 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2311 2312 /* The 24th child IO */ 2313 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2314 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2315 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2316 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2317 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2318 2319 /* The 25th child IO */ 2320 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2321 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2322 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2323 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2324 2325 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2326 50, io_done, NULL); 2327 CU_ASSERT(rc == 0); 2328 CU_ASSERT(g_io_done == false); 2329 2330 /* Parent IO supports up to 32 child iovs, so it is calculated that 2331 * a maximum of 11 IOs can be split at a time, and the 2332 * splitting will continue after the first batch is over. 2333 */ 2334 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2335 stub_complete_io(11); 2336 CU_ASSERT(g_io_done == false); 2337 2338 /* The 2nd round */ 2339 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2340 stub_complete_io(11); 2341 CU_ASSERT(g_io_done == false); 2342 2343 /* The last round */ 2344 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2345 stub_complete_io(3); 2346 CU_ASSERT(g_io_done == true); 2347 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2348 2349 /* Test an WRITE_ZEROES. This should also not be split. */ 2350 bdev->max_segment_size = 512; 2351 bdev->max_num_segments = 1; 2352 g_io_done = false; 2353 2354 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2355 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2356 2357 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2358 CU_ASSERT(rc == 0); 2359 CU_ASSERT(g_io_done == false); 2360 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2361 stub_complete_io(1); 2362 CU_ASSERT(g_io_done == true); 2363 2364 /* Test an UNMAP. This should also not be split. */ 2365 g_io_done = false; 2366 2367 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2368 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2369 2370 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2371 CU_ASSERT(rc == 0); 2372 CU_ASSERT(g_io_done == false); 2373 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2374 stub_complete_io(1); 2375 CU_ASSERT(g_io_done == true); 2376 2377 /* Test a FLUSH. This should also not be split. */ 2378 g_io_done = false; 2379 2380 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2381 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2382 2383 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2384 CU_ASSERT(rc == 0); 2385 CU_ASSERT(g_io_done == false); 2386 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2387 stub_complete_io(1); 2388 CU_ASSERT(g_io_done == true); 2389 2390 spdk_put_io_channel(io_ch); 2391 spdk_bdev_close(desc); 2392 free_bdev(bdev); 2393 spdk_bdev_finish(bdev_fini_cb, NULL); 2394 poll_threads(); 2395 } 2396 2397 static void 2398 bdev_io_mix_split_test(void) 2399 { 2400 struct spdk_bdev *bdev; 2401 struct spdk_bdev_desc *desc = NULL; 2402 struct spdk_io_channel *io_ch; 2403 struct spdk_bdev_opts bdev_opts = {}; 2404 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2405 struct ut_expected_io *expected_io; 2406 uint64_t i; 2407 int rc; 2408 2409 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2410 bdev_opts.bdev_io_pool_size = 512; 2411 bdev_opts.bdev_io_cache_size = 64; 2412 2413 rc = spdk_bdev_set_opts(&bdev_opts); 2414 CU_ASSERT(rc == 0); 2415 spdk_bdev_initialize(bdev_init_cb, NULL); 2416 2417 bdev = allocate_bdev("bdev0"); 2418 2419 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2420 CU_ASSERT(rc == 0); 2421 SPDK_CU_ASSERT_FATAL(desc != NULL); 2422 io_ch = spdk_bdev_get_io_channel(desc); 2423 CU_ASSERT(io_ch != NULL); 2424 2425 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2426 bdev->split_on_optimal_io_boundary = true; 2427 bdev->optimal_io_boundary = 16; 2428 2429 bdev->max_segment_size = 512; 2430 bdev->max_num_segments = 16; 2431 g_io_done = false; 2432 2433 /* IO crossing the IO boundary requires split 2434 * Total 2 child IOs. 2435 */ 2436 2437 /* The 1st child IO split the segment_size to multiple segment entry */ 2438 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2439 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2440 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2441 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2442 2443 /* The 2nd child IO split the segment_size to multiple segment entry */ 2444 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2445 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2446 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2447 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2448 2449 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2450 CU_ASSERT(rc == 0); 2451 CU_ASSERT(g_io_done == false); 2452 2453 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2454 stub_complete_io(2); 2455 CU_ASSERT(g_io_done == true); 2456 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2457 2458 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2459 bdev->max_segment_size = 15 * 512; 2460 bdev->max_num_segments = 1; 2461 g_io_done = false; 2462 2463 /* IO crossing the IO boundary requires split. 2464 * The 1st child IO segment size exceeds the max_segment_size, 2465 * So 1st child IO will be splitted to multiple segment entry. 2466 * Then it split to 2 child IOs because of the max_num_segments. 2467 * Total 3 child IOs. 2468 */ 2469 2470 /* The first 2 IOs are in an IO boundary. 2471 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2472 * So it split to the first 2 IOs. 2473 */ 2474 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2475 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2476 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2477 2478 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2479 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2480 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2481 2482 /* The 3rd Child IO is because of the io boundary */ 2483 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2484 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2485 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2486 2487 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2488 CU_ASSERT(rc == 0); 2489 CU_ASSERT(g_io_done == false); 2490 2491 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2492 stub_complete_io(3); 2493 CU_ASSERT(g_io_done == true); 2494 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2495 2496 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2497 bdev->max_segment_size = 17 * 512; 2498 bdev->max_num_segments = 1; 2499 g_io_done = false; 2500 2501 /* IO crossing the IO boundary requires split. 2502 * Child IO does not split. 2503 * Total 2 child IOs. 2504 */ 2505 2506 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2507 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2508 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2509 2510 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2511 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2512 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2513 2514 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2515 CU_ASSERT(rc == 0); 2516 CU_ASSERT(g_io_done == false); 2517 2518 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2519 stub_complete_io(2); 2520 CU_ASSERT(g_io_done == true); 2521 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2522 2523 /* Now set up a more complex, multi-vector command that needs to be split, 2524 * including splitting iovecs. 2525 * optimal_io_boundary < max_segment_size * max_num_segments 2526 */ 2527 bdev->max_segment_size = 3 * 512; 2528 bdev->max_num_segments = 6; 2529 g_io_done = false; 2530 2531 iov[0].iov_base = (void *)0x10000; 2532 iov[0].iov_len = 4 * 512; 2533 iov[1].iov_base = (void *)0x20000; 2534 iov[1].iov_len = 4 * 512; 2535 iov[2].iov_base = (void *)0x30000; 2536 iov[2].iov_len = 10 * 512; 2537 2538 /* IO crossing the IO boundary requires split. 2539 * The 1st child IO segment size exceeds the max_segment_size and after 2540 * splitting segment_size, the num_segments exceeds max_num_segments. 2541 * So 1st child IO will be splitted to 2 child IOs. 2542 * Total 3 child IOs. 2543 */ 2544 2545 /* The first 2 IOs are in an IO boundary. 2546 * After splitting segment size the segment num exceeds. 2547 * So it splits to 2 child IOs. 2548 */ 2549 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2550 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2551 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2552 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2553 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2554 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2555 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2556 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2557 2558 /* The 2nd child IO has the left segment entry */ 2559 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2560 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2561 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2562 2563 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2564 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2565 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2566 2567 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2568 CU_ASSERT(rc == 0); 2569 CU_ASSERT(g_io_done == false); 2570 2571 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2572 stub_complete_io(3); 2573 CU_ASSERT(g_io_done == true); 2574 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2575 2576 /* A very complicated case. Each sg entry exceeds max_segment_size 2577 * and split on io boundary. 2578 * optimal_io_boundary < max_segment_size * max_num_segments 2579 */ 2580 bdev->max_segment_size = 3 * 512; 2581 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2582 g_io_done = false; 2583 2584 for (i = 0; i < 20; i++) { 2585 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2586 iov[i].iov_len = 512 * 4; 2587 } 2588 2589 /* IO crossing the IO boundary requires split. 2590 * 80 block length can split 5 child IOs base on offset and IO boundary. 2591 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2592 * Total 5 child IOs. 2593 */ 2594 2595 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2596 * So each child IO occupies 8 child iov entries. 2597 */ 2598 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2599 for (i = 0; i < 4; i++) { 2600 int iovcnt = i * 2; 2601 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2602 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2603 } 2604 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2605 2606 /* 2nd child IO and total 16 child iov entries of parent IO */ 2607 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2608 for (i = 4; i < 8; i++) { 2609 int iovcnt = (i - 4) * 2; 2610 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2611 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2612 } 2613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2614 2615 /* 3rd child IO and total 24 child iov entries of parent IO */ 2616 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2617 for (i = 8; i < 12; i++) { 2618 int iovcnt = (i - 8) * 2; 2619 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2620 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2621 } 2622 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2623 2624 /* 4th child IO and total 32 child iov entries of parent IO */ 2625 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2626 for (i = 12; i < 16; i++) { 2627 int iovcnt = (i - 12) * 2; 2628 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2629 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2630 } 2631 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2632 2633 /* 5th child IO and because of the child iov entry it should be splitted 2634 * in next round. 2635 */ 2636 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2637 for (i = 16; i < 20; i++) { 2638 int iovcnt = (i - 16) * 2; 2639 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2640 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2641 } 2642 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2643 2644 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2645 CU_ASSERT(rc == 0); 2646 CU_ASSERT(g_io_done == false); 2647 2648 /* First split round */ 2649 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2650 stub_complete_io(4); 2651 CU_ASSERT(g_io_done == false); 2652 2653 /* Second split round */ 2654 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2655 stub_complete_io(1); 2656 CU_ASSERT(g_io_done == true); 2657 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2658 2659 spdk_put_io_channel(io_ch); 2660 spdk_bdev_close(desc); 2661 free_bdev(bdev); 2662 spdk_bdev_finish(bdev_fini_cb, NULL); 2663 poll_threads(); 2664 } 2665 2666 static void 2667 bdev_io_split_with_io_wait(void) 2668 { 2669 struct spdk_bdev *bdev; 2670 struct spdk_bdev_desc *desc = NULL; 2671 struct spdk_io_channel *io_ch; 2672 struct spdk_bdev_channel *channel; 2673 struct spdk_bdev_mgmt_channel *mgmt_ch; 2674 struct spdk_bdev_opts bdev_opts = {}; 2675 struct iovec iov[3]; 2676 struct ut_expected_io *expected_io; 2677 int rc; 2678 2679 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2680 bdev_opts.bdev_io_pool_size = 2; 2681 bdev_opts.bdev_io_cache_size = 1; 2682 2683 rc = spdk_bdev_set_opts(&bdev_opts); 2684 CU_ASSERT(rc == 0); 2685 spdk_bdev_initialize(bdev_init_cb, NULL); 2686 2687 bdev = allocate_bdev("bdev0"); 2688 2689 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2690 CU_ASSERT(rc == 0); 2691 CU_ASSERT(desc != NULL); 2692 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2693 io_ch = spdk_bdev_get_io_channel(desc); 2694 CU_ASSERT(io_ch != NULL); 2695 channel = spdk_io_channel_get_ctx(io_ch); 2696 mgmt_ch = channel->shared_resource->mgmt_ch; 2697 2698 bdev->optimal_io_boundary = 16; 2699 bdev->split_on_optimal_io_boundary = true; 2700 2701 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2702 CU_ASSERT(rc == 0); 2703 2704 /* Now test that a single-vector command is split correctly. 2705 * Offset 14, length 8, payload 0xF000 2706 * Child - Offset 14, length 2, payload 0xF000 2707 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2708 * 2709 * Set up the expected values before calling spdk_bdev_read_blocks 2710 */ 2711 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2712 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2713 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2714 2715 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2716 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2717 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2718 2719 /* The following children will be submitted sequentially due to the capacity of 2720 * spdk_bdev_io. 2721 */ 2722 2723 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2724 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2725 CU_ASSERT(rc == 0); 2726 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2727 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2728 2729 /* Completing the first read I/O will submit the first child */ 2730 stub_complete_io(1); 2731 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2732 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2733 2734 /* Completing the first child will submit the second child */ 2735 stub_complete_io(1); 2736 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2737 2738 /* Complete the second child I/O. This should result in our callback getting 2739 * invoked since the parent I/O is now complete. 2740 */ 2741 stub_complete_io(1); 2742 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2743 2744 /* Now set up a more complex, multi-vector command that needs to be split, 2745 * including splitting iovecs. 2746 */ 2747 iov[0].iov_base = (void *)0x10000; 2748 iov[0].iov_len = 512; 2749 iov[1].iov_base = (void *)0x20000; 2750 iov[1].iov_len = 20 * 512; 2751 iov[2].iov_base = (void *)0x30000; 2752 iov[2].iov_len = 11 * 512; 2753 2754 g_io_done = false; 2755 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2756 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2757 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2758 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2759 2760 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2761 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2762 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2763 2764 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2765 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2766 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2767 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2768 2769 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2770 CU_ASSERT(rc == 0); 2771 CU_ASSERT(g_io_done == false); 2772 2773 /* The following children will be submitted sequentially due to the capacity of 2774 * spdk_bdev_io. 2775 */ 2776 2777 /* Completing the first child will submit the second child */ 2778 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2779 stub_complete_io(1); 2780 CU_ASSERT(g_io_done == false); 2781 2782 /* Completing the second child will submit the third child */ 2783 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2784 stub_complete_io(1); 2785 CU_ASSERT(g_io_done == false); 2786 2787 /* Completing the third child will result in our callback getting invoked 2788 * since the parent I/O is now complete. 2789 */ 2790 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2791 stub_complete_io(1); 2792 CU_ASSERT(g_io_done == true); 2793 2794 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2795 2796 spdk_put_io_channel(io_ch); 2797 spdk_bdev_close(desc); 2798 free_bdev(bdev); 2799 spdk_bdev_finish(bdev_fini_cb, NULL); 2800 poll_threads(); 2801 } 2802 2803 static void 2804 bdev_io_alignment(void) 2805 { 2806 struct spdk_bdev *bdev; 2807 struct spdk_bdev_desc *desc = NULL; 2808 struct spdk_io_channel *io_ch; 2809 struct spdk_bdev_opts bdev_opts = {}; 2810 int rc; 2811 void *buf = NULL; 2812 struct iovec iovs[2]; 2813 int iovcnt; 2814 uint64_t alignment; 2815 2816 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2817 bdev_opts.bdev_io_pool_size = 20; 2818 bdev_opts.bdev_io_cache_size = 2; 2819 2820 rc = spdk_bdev_set_opts(&bdev_opts); 2821 CU_ASSERT(rc == 0); 2822 spdk_bdev_initialize(bdev_init_cb, NULL); 2823 2824 fn_table.submit_request = stub_submit_request_get_buf; 2825 bdev = allocate_bdev("bdev0"); 2826 2827 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2828 CU_ASSERT(rc == 0); 2829 CU_ASSERT(desc != NULL); 2830 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2831 io_ch = spdk_bdev_get_io_channel(desc); 2832 CU_ASSERT(io_ch != NULL); 2833 2834 /* Create aligned buffer */ 2835 rc = posix_memalign(&buf, 4096, 8192); 2836 SPDK_CU_ASSERT_FATAL(rc == 0); 2837 2838 /* Pass aligned single buffer with no alignment required */ 2839 alignment = 1; 2840 bdev->required_alignment = spdk_u32log2(alignment); 2841 2842 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2843 CU_ASSERT(rc == 0); 2844 stub_complete_io(1); 2845 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2846 alignment)); 2847 2848 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2849 CU_ASSERT(rc == 0); 2850 stub_complete_io(1); 2851 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2852 alignment)); 2853 2854 /* Pass unaligned single buffer with no alignment required */ 2855 alignment = 1; 2856 bdev->required_alignment = spdk_u32log2(alignment); 2857 2858 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2859 CU_ASSERT(rc == 0); 2860 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2861 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2862 stub_complete_io(1); 2863 2864 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2865 CU_ASSERT(rc == 0); 2866 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2867 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2868 stub_complete_io(1); 2869 2870 /* Pass unaligned single buffer with 512 alignment required */ 2871 alignment = 512; 2872 bdev->required_alignment = spdk_u32log2(alignment); 2873 2874 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2875 CU_ASSERT(rc == 0); 2876 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2877 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2878 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2879 alignment)); 2880 stub_complete_io(1); 2881 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2882 2883 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2884 CU_ASSERT(rc == 0); 2885 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2886 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2887 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2888 alignment)); 2889 stub_complete_io(1); 2890 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2891 2892 /* Pass unaligned single buffer with 4096 alignment required */ 2893 alignment = 4096; 2894 bdev->required_alignment = spdk_u32log2(alignment); 2895 2896 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2897 CU_ASSERT(rc == 0); 2898 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2899 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2900 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2901 alignment)); 2902 stub_complete_io(1); 2903 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2904 2905 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2906 CU_ASSERT(rc == 0); 2907 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2908 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2909 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2910 alignment)); 2911 stub_complete_io(1); 2912 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2913 2914 /* Pass aligned iovs with no alignment required */ 2915 alignment = 1; 2916 bdev->required_alignment = spdk_u32log2(alignment); 2917 2918 iovcnt = 1; 2919 iovs[0].iov_base = buf; 2920 iovs[0].iov_len = 512; 2921 2922 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2923 CU_ASSERT(rc == 0); 2924 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2925 stub_complete_io(1); 2926 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2927 2928 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2929 CU_ASSERT(rc == 0); 2930 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2931 stub_complete_io(1); 2932 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2933 2934 /* Pass unaligned iovs with no alignment required */ 2935 alignment = 1; 2936 bdev->required_alignment = spdk_u32log2(alignment); 2937 2938 iovcnt = 2; 2939 iovs[0].iov_base = buf + 16; 2940 iovs[0].iov_len = 256; 2941 iovs[1].iov_base = buf + 16 + 256 + 32; 2942 iovs[1].iov_len = 256; 2943 2944 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2945 CU_ASSERT(rc == 0); 2946 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2947 stub_complete_io(1); 2948 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2949 2950 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2951 CU_ASSERT(rc == 0); 2952 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2953 stub_complete_io(1); 2954 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2955 2956 /* Pass unaligned iov with 2048 alignment required */ 2957 alignment = 2048; 2958 bdev->required_alignment = spdk_u32log2(alignment); 2959 2960 iovcnt = 2; 2961 iovs[0].iov_base = buf + 16; 2962 iovs[0].iov_len = 256; 2963 iovs[1].iov_base = buf + 16 + 256 + 32; 2964 iovs[1].iov_len = 256; 2965 2966 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2967 CU_ASSERT(rc == 0); 2968 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2969 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2970 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2971 alignment)); 2972 stub_complete_io(1); 2973 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2974 2975 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2976 CU_ASSERT(rc == 0); 2977 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2978 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2979 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2980 alignment)); 2981 stub_complete_io(1); 2982 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2983 2984 /* Pass iov without allocated buffer without alignment required */ 2985 alignment = 1; 2986 bdev->required_alignment = spdk_u32log2(alignment); 2987 2988 iovcnt = 1; 2989 iovs[0].iov_base = NULL; 2990 iovs[0].iov_len = 0; 2991 2992 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2993 CU_ASSERT(rc == 0); 2994 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2995 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2996 alignment)); 2997 stub_complete_io(1); 2998 2999 /* Pass iov without allocated buffer with 1024 alignment required */ 3000 alignment = 1024; 3001 bdev->required_alignment = spdk_u32log2(alignment); 3002 3003 iovcnt = 1; 3004 iovs[0].iov_base = NULL; 3005 iovs[0].iov_len = 0; 3006 3007 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3008 CU_ASSERT(rc == 0); 3009 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3010 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3011 alignment)); 3012 stub_complete_io(1); 3013 3014 spdk_put_io_channel(io_ch); 3015 spdk_bdev_close(desc); 3016 free_bdev(bdev); 3017 fn_table.submit_request = stub_submit_request; 3018 spdk_bdev_finish(bdev_fini_cb, NULL); 3019 poll_threads(); 3020 3021 free(buf); 3022 } 3023 3024 static void 3025 bdev_io_alignment_with_boundary(void) 3026 { 3027 struct spdk_bdev *bdev; 3028 struct spdk_bdev_desc *desc = NULL; 3029 struct spdk_io_channel *io_ch; 3030 struct spdk_bdev_opts bdev_opts = {}; 3031 int rc; 3032 void *buf = NULL; 3033 struct iovec iovs[2]; 3034 int iovcnt; 3035 uint64_t alignment; 3036 3037 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3038 bdev_opts.bdev_io_pool_size = 20; 3039 bdev_opts.bdev_io_cache_size = 2; 3040 3041 bdev_opts.opts_size = sizeof(bdev_opts); 3042 rc = spdk_bdev_set_opts(&bdev_opts); 3043 CU_ASSERT(rc == 0); 3044 spdk_bdev_initialize(bdev_init_cb, NULL); 3045 3046 fn_table.submit_request = stub_submit_request_get_buf; 3047 bdev = allocate_bdev("bdev0"); 3048 3049 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3050 CU_ASSERT(rc == 0); 3051 CU_ASSERT(desc != NULL); 3052 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3053 io_ch = spdk_bdev_get_io_channel(desc); 3054 CU_ASSERT(io_ch != NULL); 3055 3056 /* Create aligned buffer */ 3057 rc = posix_memalign(&buf, 4096, 131072); 3058 SPDK_CU_ASSERT_FATAL(rc == 0); 3059 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3060 3061 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3062 alignment = 512; 3063 bdev->required_alignment = spdk_u32log2(alignment); 3064 bdev->optimal_io_boundary = 2; 3065 bdev->split_on_optimal_io_boundary = true; 3066 3067 iovcnt = 1; 3068 iovs[0].iov_base = NULL; 3069 iovs[0].iov_len = 512 * 3; 3070 3071 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3072 CU_ASSERT(rc == 0); 3073 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3074 stub_complete_io(2); 3075 3076 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3077 alignment = 512; 3078 bdev->required_alignment = spdk_u32log2(alignment); 3079 bdev->optimal_io_boundary = 16; 3080 bdev->split_on_optimal_io_boundary = true; 3081 3082 iovcnt = 1; 3083 iovs[0].iov_base = NULL; 3084 iovs[0].iov_len = 512 * 16; 3085 3086 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3087 CU_ASSERT(rc == 0); 3088 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3089 stub_complete_io(2); 3090 3091 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3092 alignment = 512; 3093 bdev->required_alignment = spdk_u32log2(alignment); 3094 bdev->optimal_io_boundary = 128; 3095 bdev->split_on_optimal_io_boundary = true; 3096 3097 iovcnt = 1; 3098 iovs[0].iov_base = buf + 16; 3099 iovs[0].iov_len = 512 * 160; 3100 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3101 CU_ASSERT(rc == 0); 3102 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3103 stub_complete_io(2); 3104 3105 /* 512 * 3 with 2 IO boundary */ 3106 alignment = 512; 3107 bdev->required_alignment = spdk_u32log2(alignment); 3108 bdev->optimal_io_boundary = 2; 3109 bdev->split_on_optimal_io_boundary = true; 3110 3111 iovcnt = 2; 3112 iovs[0].iov_base = buf + 16; 3113 iovs[0].iov_len = 512; 3114 iovs[1].iov_base = buf + 16 + 512 + 32; 3115 iovs[1].iov_len = 1024; 3116 3117 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3118 CU_ASSERT(rc == 0); 3119 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3120 stub_complete_io(2); 3121 3122 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3123 CU_ASSERT(rc == 0); 3124 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3125 stub_complete_io(2); 3126 3127 /* 512 * 64 with 32 IO boundary */ 3128 bdev->optimal_io_boundary = 32; 3129 iovcnt = 2; 3130 iovs[0].iov_base = buf + 16; 3131 iovs[0].iov_len = 16384; 3132 iovs[1].iov_base = buf + 16 + 16384 + 32; 3133 iovs[1].iov_len = 16384; 3134 3135 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3136 CU_ASSERT(rc == 0); 3137 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3138 stub_complete_io(3); 3139 3140 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3141 CU_ASSERT(rc == 0); 3142 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3143 stub_complete_io(3); 3144 3145 /* 512 * 160 with 32 IO boundary */ 3146 iovcnt = 1; 3147 iovs[0].iov_base = buf + 16; 3148 iovs[0].iov_len = 16384 + 65536; 3149 3150 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3151 CU_ASSERT(rc == 0); 3152 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3153 stub_complete_io(6); 3154 3155 spdk_put_io_channel(io_ch); 3156 spdk_bdev_close(desc); 3157 free_bdev(bdev); 3158 fn_table.submit_request = stub_submit_request; 3159 spdk_bdev_finish(bdev_fini_cb, NULL); 3160 poll_threads(); 3161 3162 free(buf); 3163 } 3164 3165 static void 3166 histogram_status_cb(void *cb_arg, int status) 3167 { 3168 g_status = status; 3169 } 3170 3171 static void 3172 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3173 { 3174 g_status = status; 3175 g_histogram = histogram; 3176 } 3177 3178 static void 3179 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3180 uint64_t total, uint64_t so_far) 3181 { 3182 g_count += count; 3183 } 3184 3185 static void 3186 bdev_histograms(void) 3187 { 3188 struct spdk_bdev *bdev; 3189 struct spdk_bdev_desc *desc = NULL; 3190 struct spdk_io_channel *ch; 3191 struct spdk_histogram_data *histogram; 3192 uint8_t buf[4096]; 3193 int rc; 3194 3195 spdk_bdev_initialize(bdev_init_cb, NULL); 3196 3197 bdev = allocate_bdev("bdev"); 3198 3199 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3200 CU_ASSERT(rc == 0); 3201 CU_ASSERT(desc != NULL); 3202 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3203 3204 ch = spdk_bdev_get_io_channel(desc); 3205 CU_ASSERT(ch != NULL); 3206 3207 /* Enable histogram */ 3208 g_status = -1; 3209 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3210 poll_threads(); 3211 CU_ASSERT(g_status == 0); 3212 CU_ASSERT(bdev->internal.histogram_enabled == true); 3213 3214 /* Allocate histogram */ 3215 histogram = spdk_histogram_data_alloc(); 3216 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3217 3218 /* Check if histogram is zeroed */ 3219 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3220 poll_threads(); 3221 CU_ASSERT(g_status == 0); 3222 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3223 3224 g_count = 0; 3225 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3226 3227 CU_ASSERT(g_count == 0); 3228 3229 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3230 CU_ASSERT(rc == 0); 3231 3232 spdk_delay_us(10); 3233 stub_complete_io(1); 3234 poll_threads(); 3235 3236 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3237 CU_ASSERT(rc == 0); 3238 3239 spdk_delay_us(10); 3240 stub_complete_io(1); 3241 poll_threads(); 3242 3243 /* Check if histogram gathered data from all I/O channels */ 3244 g_histogram = NULL; 3245 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3246 poll_threads(); 3247 CU_ASSERT(g_status == 0); 3248 CU_ASSERT(bdev->internal.histogram_enabled == true); 3249 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3250 3251 g_count = 0; 3252 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3253 CU_ASSERT(g_count == 2); 3254 3255 /* Disable histogram */ 3256 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3257 poll_threads(); 3258 CU_ASSERT(g_status == 0); 3259 CU_ASSERT(bdev->internal.histogram_enabled == false); 3260 3261 /* Try to run histogram commands on disabled bdev */ 3262 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3263 poll_threads(); 3264 CU_ASSERT(g_status == -EFAULT); 3265 3266 spdk_histogram_data_free(histogram); 3267 spdk_put_io_channel(ch); 3268 spdk_bdev_close(desc); 3269 free_bdev(bdev); 3270 spdk_bdev_finish(bdev_fini_cb, NULL); 3271 poll_threads(); 3272 } 3273 3274 static void 3275 _bdev_compare(bool emulated) 3276 { 3277 struct spdk_bdev *bdev; 3278 struct spdk_bdev_desc *desc = NULL; 3279 struct spdk_io_channel *ioch; 3280 struct ut_expected_io *expected_io; 3281 uint64_t offset, num_blocks; 3282 uint32_t num_completed; 3283 char aa_buf[512]; 3284 char bb_buf[512]; 3285 struct iovec compare_iov; 3286 uint8_t io_type; 3287 int rc; 3288 3289 if (emulated) { 3290 io_type = SPDK_BDEV_IO_TYPE_READ; 3291 } else { 3292 io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3293 } 3294 3295 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3296 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3297 3298 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3299 3300 spdk_bdev_initialize(bdev_init_cb, NULL); 3301 fn_table.submit_request = stub_submit_request_get_buf; 3302 bdev = allocate_bdev("bdev"); 3303 3304 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3305 CU_ASSERT_EQUAL(rc, 0); 3306 SPDK_CU_ASSERT_FATAL(desc != NULL); 3307 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3308 ioch = spdk_bdev_get_io_channel(desc); 3309 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3310 3311 fn_table.submit_request = stub_submit_request_get_buf; 3312 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3313 3314 offset = 50; 3315 num_blocks = 1; 3316 compare_iov.iov_base = aa_buf; 3317 compare_iov.iov_len = sizeof(aa_buf); 3318 3319 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3320 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3321 3322 g_io_done = false; 3323 g_compare_read_buf = aa_buf; 3324 g_compare_read_buf_len = sizeof(aa_buf); 3325 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3326 CU_ASSERT_EQUAL(rc, 0); 3327 num_completed = stub_complete_io(1); 3328 CU_ASSERT_EQUAL(num_completed, 1); 3329 CU_ASSERT(g_io_done == true); 3330 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3331 3332 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0); 3333 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3334 3335 g_io_done = false; 3336 g_compare_read_buf = bb_buf; 3337 g_compare_read_buf_len = sizeof(bb_buf); 3338 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3339 CU_ASSERT_EQUAL(rc, 0); 3340 num_completed = stub_complete_io(1); 3341 CU_ASSERT_EQUAL(num_completed, 1); 3342 CU_ASSERT(g_io_done == true); 3343 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3344 3345 spdk_put_io_channel(ioch); 3346 spdk_bdev_close(desc); 3347 free_bdev(bdev); 3348 fn_table.submit_request = stub_submit_request; 3349 spdk_bdev_finish(bdev_fini_cb, NULL); 3350 poll_threads(); 3351 3352 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3353 3354 g_compare_read_buf = NULL; 3355 } 3356 3357 static void 3358 bdev_compare(void) 3359 { 3360 _bdev_compare(true); 3361 _bdev_compare(false); 3362 } 3363 3364 static void 3365 bdev_compare_and_write(void) 3366 { 3367 struct spdk_bdev *bdev; 3368 struct spdk_bdev_desc *desc = NULL; 3369 struct spdk_io_channel *ioch; 3370 struct ut_expected_io *expected_io; 3371 uint64_t offset, num_blocks; 3372 uint32_t num_completed; 3373 char aa_buf[512]; 3374 char bb_buf[512]; 3375 char cc_buf[512]; 3376 char write_buf[512]; 3377 struct iovec compare_iov; 3378 struct iovec write_iov; 3379 int rc; 3380 3381 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3382 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3383 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3384 3385 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3386 3387 spdk_bdev_initialize(bdev_init_cb, NULL); 3388 fn_table.submit_request = stub_submit_request_get_buf; 3389 bdev = allocate_bdev("bdev"); 3390 3391 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3392 CU_ASSERT_EQUAL(rc, 0); 3393 SPDK_CU_ASSERT_FATAL(desc != NULL); 3394 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3395 ioch = spdk_bdev_get_io_channel(desc); 3396 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3397 3398 fn_table.submit_request = stub_submit_request_get_buf; 3399 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3400 3401 offset = 50; 3402 num_blocks = 1; 3403 compare_iov.iov_base = aa_buf; 3404 compare_iov.iov_len = sizeof(aa_buf); 3405 write_iov.iov_base = bb_buf; 3406 write_iov.iov_len = sizeof(bb_buf); 3407 3408 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3409 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3410 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3411 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3412 3413 g_io_done = false; 3414 g_compare_read_buf = aa_buf; 3415 g_compare_read_buf_len = sizeof(aa_buf); 3416 memset(write_buf, 0, sizeof(write_buf)); 3417 g_compare_write_buf = write_buf; 3418 g_compare_write_buf_len = sizeof(write_buf); 3419 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3420 offset, num_blocks, io_done, NULL); 3421 /* Trigger range locking */ 3422 poll_threads(); 3423 CU_ASSERT_EQUAL(rc, 0); 3424 num_completed = stub_complete_io(1); 3425 CU_ASSERT_EQUAL(num_completed, 1); 3426 CU_ASSERT(g_io_done == false); 3427 num_completed = stub_complete_io(1); 3428 /* Trigger range unlocking */ 3429 poll_threads(); 3430 CU_ASSERT_EQUAL(num_completed, 1); 3431 CU_ASSERT(g_io_done == true); 3432 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3433 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3434 3435 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3436 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3437 3438 g_io_done = false; 3439 g_compare_read_buf = cc_buf; 3440 g_compare_read_buf_len = sizeof(cc_buf); 3441 memset(write_buf, 0, sizeof(write_buf)); 3442 g_compare_write_buf = write_buf; 3443 g_compare_write_buf_len = sizeof(write_buf); 3444 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3445 offset, num_blocks, io_done, NULL); 3446 /* Trigger range locking */ 3447 poll_threads(); 3448 CU_ASSERT_EQUAL(rc, 0); 3449 num_completed = stub_complete_io(1); 3450 /* Trigger range unlocking earlier because we expect error here */ 3451 poll_threads(); 3452 CU_ASSERT_EQUAL(num_completed, 1); 3453 CU_ASSERT(g_io_done == true); 3454 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3455 num_completed = stub_complete_io(1); 3456 CU_ASSERT_EQUAL(num_completed, 0); 3457 3458 spdk_put_io_channel(ioch); 3459 spdk_bdev_close(desc); 3460 free_bdev(bdev); 3461 fn_table.submit_request = stub_submit_request; 3462 spdk_bdev_finish(bdev_fini_cb, NULL); 3463 poll_threads(); 3464 3465 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3466 3467 g_compare_read_buf = NULL; 3468 g_compare_write_buf = NULL; 3469 } 3470 3471 static void 3472 bdev_write_zeroes(void) 3473 { 3474 struct spdk_bdev *bdev; 3475 struct spdk_bdev_desc *desc = NULL; 3476 struct spdk_io_channel *ioch; 3477 struct ut_expected_io *expected_io; 3478 uint64_t offset, num_io_blocks, num_blocks; 3479 uint32_t num_completed, num_requests; 3480 int rc; 3481 3482 spdk_bdev_initialize(bdev_init_cb, NULL); 3483 bdev = allocate_bdev("bdev"); 3484 3485 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3486 CU_ASSERT_EQUAL(rc, 0); 3487 SPDK_CU_ASSERT_FATAL(desc != NULL); 3488 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3489 ioch = spdk_bdev_get_io_channel(desc); 3490 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3491 3492 fn_table.submit_request = stub_submit_request; 3493 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3494 3495 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3496 bdev->md_len = 0; 3497 bdev->blocklen = 4096; 3498 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3499 3500 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3501 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3502 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3503 CU_ASSERT_EQUAL(rc, 0); 3504 num_completed = stub_complete_io(1); 3505 CU_ASSERT_EQUAL(num_completed, 1); 3506 3507 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3508 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3509 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3510 num_requests = 2; 3511 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3512 3513 for (offset = 0; offset < num_requests; ++offset) { 3514 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3515 offset * num_io_blocks, num_io_blocks, 0); 3516 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3517 } 3518 3519 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3520 CU_ASSERT_EQUAL(rc, 0); 3521 num_completed = stub_complete_io(num_requests); 3522 CU_ASSERT_EQUAL(num_completed, num_requests); 3523 3524 /* Check that the splitting is correct if bdev has interleaved metadata */ 3525 bdev->md_interleave = true; 3526 bdev->md_len = 64; 3527 bdev->blocklen = 4096 + 64; 3528 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3529 3530 num_requests = offset = 0; 3531 while (offset < num_blocks) { 3532 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3533 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3534 offset, num_io_blocks, 0); 3535 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3536 offset += num_io_blocks; 3537 num_requests++; 3538 } 3539 3540 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3541 CU_ASSERT_EQUAL(rc, 0); 3542 num_completed = stub_complete_io(num_requests); 3543 CU_ASSERT_EQUAL(num_completed, num_requests); 3544 num_completed = stub_complete_io(num_requests); 3545 assert(num_completed == 0); 3546 3547 /* Check the the same for separate metadata buffer */ 3548 bdev->md_interleave = false; 3549 bdev->md_len = 64; 3550 bdev->blocklen = 4096; 3551 3552 num_requests = offset = 0; 3553 while (offset < num_blocks) { 3554 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3555 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3556 offset, num_io_blocks, 0); 3557 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3558 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3559 offset += num_io_blocks; 3560 num_requests++; 3561 } 3562 3563 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3564 CU_ASSERT_EQUAL(rc, 0); 3565 num_completed = stub_complete_io(num_requests); 3566 CU_ASSERT_EQUAL(num_completed, num_requests); 3567 3568 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3569 spdk_put_io_channel(ioch); 3570 spdk_bdev_close(desc); 3571 free_bdev(bdev); 3572 spdk_bdev_finish(bdev_fini_cb, NULL); 3573 poll_threads(); 3574 } 3575 3576 static void 3577 bdev_zcopy_write(void) 3578 { 3579 struct spdk_bdev *bdev; 3580 struct spdk_bdev_desc *desc = NULL; 3581 struct spdk_io_channel *ioch; 3582 struct ut_expected_io *expected_io; 3583 uint64_t offset, num_blocks; 3584 uint32_t num_completed; 3585 char aa_buf[512]; 3586 struct iovec iov; 3587 int rc; 3588 const bool populate = false; 3589 const bool commit = true; 3590 3591 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3592 3593 spdk_bdev_initialize(bdev_init_cb, NULL); 3594 bdev = allocate_bdev("bdev"); 3595 3596 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3597 CU_ASSERT_EQUAL(rc, 0); 3598 SPDK_CU_ASSERT_FATAL(desc != NULL); 3599 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3600 ioch = spdk_bdev_get_io_channel(desc); 3601 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3602 3603 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3604 3605 offset = 50; 3606 num_blocks = 1; 3607 iov.iov_base = NULL; 3608 iov.iov_len = 0; 3609 3610 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3611 g_zcopy_read_buf_len = (uint32_t) -1; 3612 /* Do a zcopy start for a write (populate=false) */ 3613 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3614 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3615 g_io_done = false; 3616 g_zcopy_write_buf = aa_buf; 3617 g_zcopy_write_buf_len = sizeof(aa_buf); 3618 g_zcopy_bdev_io = NULL; 3619 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3620 CU_ASSERT_EQUAL(rc, 0); 3621 num_completed = stub_complete_io(1); 3622 CU_ASSERT_EQUAL(num_completed, 1); 3623 CU_ASSERT(g_io_done == true); 3624 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3625 /* Check that the iov has been set up */ 3626 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3627 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3628 /* Check that the bdev_io has been saved */ 3629 CU_ASSERT(g_zcopy_bdev_io != NULL); 3630 /* Now do the zcopy end for a write (commit=true) */ 3631 g_io_done = false; 3632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3633 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3634 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3635 CU_ASSERT_EQUAL(rc, 0); 3636 num_completed = stub_complete_io(1); 3637 CU_ASSERT_EQUAL(num_completed, 1); 3638 CU_ASSERT(g_io_done == true); 3639 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3640 /* Check the g_zcopy are reset by io_done */ 3641 CU_ASSERT(g_zcopy_write_buf == NULL); 3642 CU_ASSERT(g_zcopy_write_buf_len == 0); 3643 /* Check that io_done has freed the g_zcopy_bdev_io */ 3644 CU_ASSERT(g_zcopy_bdev_io == NULL); 3645 3646 /* Check the zcopy read buffer has not been touched which 3647 * ensures that the correct buffers were used. 3648 */ 3649 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3650 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3651 3652 spdk_put_io_channel(ioch); 3653 spdk_bdev_close(desc); 3654 free_bdev(bdev); 3655 spdk_bdev_finish(bdev_fini_cb, NULL); 3656 poll_threads(); 3657 } 3658 3659 static void 3660 bdev_zcopy_read(void) 3661 { 3662 struct spdk_bdev *bdev; 3663 struct spdk_bdev_desc *desc = NULL; 3664 struct spdk_io_channel *ioch; 3665 struct ut_expected_io *expected_io; 3666 uint64_t offset, num_blocks; 3667 uint32_t num_completed; 3668 char aa_buf[512]; 3669 struct iovec iov; 3670 int rc; 3671 const bool populate = true; 3672 const bool commit = false; 3673 3674 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3675 3676 spdk_bdev_initialize(bdev_init_cb, NULL); 3677 bdev = allocate_bdev("bdev"); 3678 3679 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3680 CU_ASSERT_EQUAL(rc, 0); 3681 SPDK_CU_ASSERT_FATAL(desc != NULL); 3682 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3683 ioch = spdk_bdev_get_io_channel(desc); 3684 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3685 3686 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3687 3688 offset = 50; 3689 num_blocks = 1; 3690 iov.iov_base = NULL; 3691 iov.iov_len = 0; 3692 3693 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3694 g_zcopy_write_buf_len = (uint32_t) -1; 3695 3696 /* Do a zcopy start for a read (populate=true) */ 3697 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3698 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3699 g_io_done = false; 3700 g_zcopy_read_buf = aa_buf; 3701 g_zcopy_read_buf_len = sizeof(aa_buf); 3702 g_zcopy_bdev_io = NULL; 3703 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3704 CU_ASSERT_EQUAL(rc, 0); 3705 num_completed = stub_complete_io(1); 3706 CU_ASSERT_EQUAL(num_completed, 1); 3707 CU_ASSERT(g_io_done == true); 3708 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3709 /* Check that the iov has been set up */ 3710 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3711 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3712 /* Check that the bdev_io has been saved */ 3713 CU_ASSERT(g_zcopy_bdev_io != NULL); 3714 3715 /* Now do the zcopy end for a read (commit=false) */ 3716 g_io_done = false; 3717 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3718 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3719 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3720 CU_ASSERT_EQUAL(rc, 0); 3721 num_completed = stub_complete_io(1); 3722 CU_ASSERT_EQUAL(num_completed, 1); 3723 CU_ASSERT(g_io_done == true); 3724 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3725 /* Check the g_zcopy are reset by io_done */ 3726 CU_ASSERT(g_zcopy_read_buf == NULL); 3727 CU_ASSERT(g_zcopy_read_buf_len == 0); 3728 /* Check that io_done has freed the g_zcopy_bdev_io */ 3729 CU_ASSERT(g_zcopy_bdev_io == NULL); 3730 3731 /* Check the zcopy write buffer has not been touched which 3732 * ensures that the correct buffers were used. 3733 */ 3734 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3735 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3736 3737 spdk_put_io_channel(ioch); 3738 spdk_bdev_close(desc); 3739 free_bdev(bdev); 3740 spdk_bdev_finish(bdev_fini_cb, NULL); 3741 poll_threads(); 3742 } 3743 3744 static void 3745 bdev_open_while_hotremove(void) 3746 { 3747 struct spdk_bdev *bdev; 3748 struct spdk_bdev_desc *desc[2] = {}; 3749 int rc; 3750 3751 bdev = allocate_bdev("bdev"); 3752 3753 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3754 CU_ASSERT(rc == 0); 3755 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3756 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3757 3758 spdk_bdev_unregister(bdev, NULL, NULL); 3759 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 3760 poll_threads(); 3761 3762 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3763 CU_ASSERT(rc == -ENODEV); 3764 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3765 3766 spdk_bdev_close(desc[0]); 3767 free_bdev(bdev); 3768 } 3769 3770 static void 3771 bdev_close_while_hotremove(void) 3772 { 3773 struct spdk_bdev *bdev; 3774 struct spdk_bdev_desc *desc = NULL; 3775 int rc = 0; 3776 3777 bdev = allocate_bdev("bdev"); 3778 3779 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3780 CU_ASSERT_EQUAL(rc, 0); 3781 SPDK_CU_ASSERT_FATAL(desc != NULL); 3782 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3783 3784 /* Simulate hot-unplug by unregistering bdev */ 3785 g_event_type1 = 0xFF; 3786 g_unregister_arg = NULL; 3787 g_unregister_rc = -1; 3788 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3789 /* Close device while remove event is in flight */ 3790 spdk_bdev_close(desc); 3791 3792 /* Ensure that unregister callback is delayed */ 3793 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3794 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3795 3796 poll_threads(); 3797 3798 /* Event callback shall not be issued because device was closed */ 3799 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3800 /* Unregister callback is issued */ 3801 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3802 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3803 3804 free_bdev(bdev); 3805 } 3806 3807 static void 3808 bdev_open_ext(void) 3809 { 3810 struct spdk_bdev *bdev; 3811 struct spdk_bdev_desc *desc1 = NULL; 3812 struct spdk_bdev_desc *desc2 = NULL; 3813 int rc = 0; 3814 3815 bdev = allocate_bdev("bdev"); 3816 3817 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3818 CU_ASSERT_EQUAL(rc, -EINVAL); 3819 3820 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3821 CU_ASSERT_EQUAL(rc, 0); 3822 3823 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3824 CU_ASSERT_EQUAL(rc, 0); 3825 3826 g_event_type1 = 0xFF; 3827 g_event_type2 = 0xFF; 3828 3829 /* Simulate hot-unplug by unregistering bdev */ 3830 spdk_bdev_unregister(bdev, NULL, NULL); 3831 poll_threads(); 3832 3833 /* Check if correct events have been triggered in event callback fn */ 3834 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3835 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3836 3837 free_bdev(bdev); 3838 poll_threads(); 3839 } 3840 3841 static void 3842 bdev_open_ext_unregister(void) 3843 { 3844 struct spdk_bdev *bdev; 3845 struct spdk_bdev_desc *desc1 = NULL; 3846 struct spdk_bdev_desc *desc2 = NULL; 3847 struct spdk_bdev_desc *desc3 = NULL; 3848 struct spdk_bdev_desc *desc4 = NULL; 3849 int rc = 0; 3850 3851 bdev = allocate_bdev("bdev"); 3852 3853 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 3854 CU_ASSERT_EQUAL(rc, -EINVAL); 3855 3856 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 3857 CU_ASSERT_EQUAL(rc, 0); 3858 3859 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 3860 CU_ASSERT_EQUAL(rc, 0); 3861 3862 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 3863 CU_ASSERT_EQUAL(rc, 0); 3864 3865 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 3866 CU_ASSERT_EQUAL(rc, 0); 3867 3868 g_event_type1 = 0xFF; 3869 g_event_type2 = 0xFF; 3870 g_event_type3 = 0xFF; 3871 g_event_type4 = 0xFF; 3872 3873 g_unregister_arg = NULL; 3874 g_unregister_rc = -1; 3875 3876 /* Simulate hot-unplug by unregistering bdev */ 3877 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3878 3879 /* 3880 * Unregister is handled asynchronously and event callback 3881 * (i.e., above bdev_open_cbN) will be called. 3882 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 3883 * close the desc3 and desc4 so that the bdev is not closed. 3884 */ 3885 poll_threads(); 3886 3887 /* Check if correct events have been triggered in event callback fn */ 3888 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 3889 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 3890 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 3891 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 3892 3893 /* Check that unregister callback is delayed */ 3894 CU_ASSERT(g_unregister_arg == NULL); 3895 CU_ASSERT(g_unregister_rc == -1); 3896 3897 /* 3898 * Explicitly close desc3. As desc4 is still opened there, the 3899 * unergister callback is still delayed to execute. 3900 */ 3901 spdk_bdev_close(desc3); 3902 CU_ASSERT(g_unregister_arg == NULL); 3903 CU_ASSERT(g_unregister_rc == -1); 3904 3905 /* 3906 * Explicitly close desc4 to trigger the ongoing bdev unregister 3907 * operation after last desc is closed. 3908 */ 3909 spdk_bdev_close(desc4); 3910 3911 /* Poll the thread for the async unregister operation */ 3912 poll_threads(); 3913 3914 /* Check that unregister callback is executed */ 3915 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 3916 CU_ASSERT(g_unregister_rc == 0); 3917 3918 free_bdev(bdev); 3919 poll_threads(); 3920 } 3921 3922 struct timeout_io_cb_arg { 3923 struct iovec iov; 3924 uint8_t type; 3925 }; 3926 3927 static int 3928 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 3929 { 3930 struct spdk_bdev_io *bdev_io; 3931 int n = 0; 3932 3933 if (!ch) { 3934 return -1; 3935 } 3936 3937 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 3938 n++; 3939 } 3940 3941 return n; 3942 } 3943 3944 static void 3945 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 3946 { 3947 struct timeout_io_cb_arg *ctx = cb_arg; 3948 3949 ctx->type = bdev_io->type; 3950 ctx->iov.iov_base = bdev_io->iov.iov_base; 3951 ctx->iov.iov_len = bdev_io->iov.iov_len; 3952 } 3953 3954 static void 3955 bdev_set_io_timeout(void) 3956 { 3957 struct spdk_bdev *bdev; 3958 struct spdk_bdev_desc *desc = NULL; 3959 struct spdk_io_channel *io_ch = NULL; 3960 struct spdk_bdev_channel *bdev_ch = NULL; 3961 struct timeout_io_cb_arg cb_arg; 3962 3963 spdk_bdev_initialize(bdev_init_cb, NULL); 3964 3965 bdev = allocate_bdev("bdev"); 3966 3967 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 3968 SPDK_CU_ASSERT_FATAL(desc != NULL); 3969 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3970 3971 io_ch = spdk_bdev_get_io_channel(desc); 3972 CU_ASSERT(io_ch != NULL); 3973 3974 bdev_ch = spdk_io_channel_get_ctx(io_ch); 3975 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 3976 3977 /* This is the part1. 3978 * We will check the bdev_ch->io_submitted list 3979 * TO make sure that it can link IOs and only the user submitted IOs 3980 */ 3981 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 3982 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3983 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 3984 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 3985 stub_complete_io(1); 3986 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 3987 stub_complete_io(1); 3988 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 3989 3990 /* Split IO */ 3991 bdev->optimal_io_boundary = 16; 3992 bdev->split_on_optimal_io_boundary = true; 3993 3994 /* Now test that a single-vector command is split correctly. 3995 * Offset 14, length 8, payload 0xF000 3996 * Child - Offset 14, length 2, payload 0xF000 3997 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3998 * 3999 * Set up the expected values before calling spdk_bdev_read_blocks 4000 */ 4001 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4002 /* We count all submitted IOs including IO that are generated by splitting. */ 4003 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4004 stub_complete_io(1); 4005 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4006 stub_complete_io(1); 4007 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4008 4009 /* Also include the reset IO */ 4010 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4011 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4012 poll_threads(); 4013 stub_complete_io(1); 4014 poll_threads(); 4015 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4016 4017 /* This is part2 4018 * Test the desc timeout poller register 4019 */ 4020 4021 /* Successfully set the timeout */ 4022 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4023 CU_ASSERT(desc->io_timeout_poller != NULL); 4024 CU_ASSERT(desc->timeout_in_sec == 30); 4025 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4026 CU_ASSERT(desc->cb_arg == &cb_arg); 4027 4028 /* Change the timeout limit */ 4029 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4030 CU_ASSERT(desc->io_timeout_poller != NULL); 4031 CU_ASSERT(desc->timeout_in_sec == 20); 4032 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4033 CU_ASSERT(desc->cb_arg == &cb_arg); 4034 4035 /* Disable the timeout */ 4036 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4037 CU_ASSERT(desc->io_timeout_poller == NULL); 4038 4039 /* This the part3 4040 * We will test to catch timeout IO and check whether the IO is 4041 * the submitted one. 4042 */ 4043 memset(&cb_arg, 0, sizeof(cb_arg)); 4044 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4045 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4046 4047 /* Don't reach the limit */ 4048 spdk_delay_us(15 * spdk_get_ticks_hz()); 4049 poll_threads(); 4050 CU_ASSERT(cb_arg.type == 0); 4051 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4052 CU_ASSERT(cb_arg.iov.iov_len == 0); 4053 4054 /* 15 + 15 = 30 reach the limit */ 4055 spdk_delay_us(15 * spdk_get_ticks_hz()); 4056 poll_threads(); 4057 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4058 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4059 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4060 stub_complete_io(1); 4061 4062 /* Use the same split IO above and check the IO */ 4063 memset(&cb_arg, 0, sizeof(cb_arg)); 4064 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4065 4066 /* The first child complete in time */ 4067 spdk_delay_us(15 * spdk_get_ticks_hz()); 4068 poll_threads(); 4069 stub_complete_io(1); 4070 CU_ASSERT(cb_arg.type == 0); 4071 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4072 CU_ASSERT(cb_arg.iov.iov_len == 0); 4073 4074 /* The second child reach the limit */ 4075 spdk_delay_us(15 * spdk_get_ticks_hz()); 4076 poll_threads(); 4077 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4078 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4079 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4080 stub_complete_io(1); 4081 4082 /* Also include the reset IO */ 4083 memset(&cb_arg, 0, sizeof(cb_arg)); 4084 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4085 spdk_delay_us(30 * spdk_get_ticks_hz()); 4086 poll_threads(); 4087 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4088 stub_complete_io(1); 4089 poll_threads(); 4090 4091 spdk_put_io_channel(io_ch); 4092 spdk_bdev_close(desc); 4093 free_bdev(bdev); 4094 spdk_bdev_finish(bdev_fini_cb, NULL); 4095 poll_threads(); 4096 } 4097 4098 static void 4099 bdev_set_qd_sampling(void) 4100 { 4101 struct spdk_bdev *bdev; 4102 struct spdk_bdev_desc *desc = NULL; 4103 struct spdk_io_channel *io_ch = NULL; 4104 struct spdk_bdev_channel *bdev_ch = NULL; 4105 struct timeout_io_cb_arg cb_arg; 4106 4107 spdk_bdev_initialize(bdev_init_cb, NULL); 4108 4109 bdev = allocate_bdev("bdev"); 4110 4111 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4112 SPDK_CU_ASSERT_FATAL(desc != NULL); 4113 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4114 4115 io_ch = spdk_bdev_get_io_channel(desc); 4116 CU_ASSERT(io_ch != NULL); 4117 4118 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4119 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4120 4121 /* This is the part1. 4122 * We will check the bdev_ch->io_submitted list 4123 * TO make sure that it can link IOs and only the user submitted IOs 4124 */ 4125 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4126 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4127 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4128 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4129 stub_complete_io(1); 4130 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4131 stub_complete_io(1); 4132 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4133 4134 /* This is the part2. 4135 * Test the bdev's qd poller register 4136 */ 4137 /* 1st Successfully set the qd sampling period */ 4138 spdk_bdev_set_qd_sampling_period(bdev, 10); 4139 CU_ASSERT(bdev->internal.new_period == 10); 4140 CU_ASSERT(bdev->internal.period == 10); 4141 CU_ASSERT(bdev->internal.qd_desc != NULL); 4142 poll_threads(); 4143 CU_ASSERT(bdev->internal.qd_poller != NULL); 4144 4145 /* 2nd Change the qd sampling period */ 4146 spdk_bdev_set_qd_sampling_period(bdev, 20); 4147 CU_ASSERT(bdev->internal.new_period == 20); 4148 CU_ASSERT(bdev->internal.period == 10); 4149 CU_ASSERT(bdev->internal.qd_desc != NULL); 4150 poll_threads(); 4151 CU_ASSERT(bdev->internal.qd_poller != NULL); 4152 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4153 4154 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4155 spdk_delay_us(20); 4156 poll_thread_times(0, 1); 4157 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4158 spdk_bdev_set_qd_sampling_period(bdev, 30); 4159 CU_ASSERT(bdev->internal.new_period == 30); 4160 CU_ASSERT(bdev->internal.period == 20); 4161 poll_threads(); 4162 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4163 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4164 4165 /* 4th Disable the qd sampling period */ 4166 spdk_bdev_set_qd_sampling_period(bdev, 0); 4167 CU_ASSERT(bdev->internal.new_period == 0); 4168 CU_ASSERT(bdev->internal.period == 30); 4169 poll_threads(); 4170 CU_ASSERT(bdev->internal.qd_poller == NULL); 4171 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4172 CU_ASSERT(bdev->internal.qd_desc == NULL); 4173 4174 /* This is the part3. 4175 * We will test the submitted IO and reset works 4176 * properly with the qd sampling. 4177 */ 4178 memset(&cb_arg, 0, sizeof(cb_arg)); 4179 spdk_bdev_set_qd_sampling_period(bdev, 1); 4180 poll_threads(); 4181 4182 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4183 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4184 4185 /* Also include the reset IO */ 4186 memset(&cb_arg, 0, sizeof(cb_arg)); 4187 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4188 poll_threads(); 4189 4190 /* Close the desc */ 4191 spdk_put_io_channel(io_ch); 4192 spdk_bdev_close(desc); 4193 4194 /* Complete the submitted IO and reset */ 4195 stub_complete_io(2); 4196 poll_threads(); 4197 4198 free_bdev(bdev); 4199 spdk_bdev_finish(bdev_fini_cb, NULL); 4200 poll_threads(); 4201 } 4202 4203 static void 4204 lba_range_overlap(void) 4205 { 4206 struct lba_range r1, r2; 4207 4208 r1.offset = 100; 4209 r1.length = 50; 4210 4211 r2.offset = 0; 4212 r2.length = 1; 4213 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4214 4215 r2.offset = 0; 4216 r2.length = 100; 4217 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4218 4219 r2.offset = 0; 4220 r2.length = 110; 4221 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4222 4223 r2.offset = 100; 4224 r2.length = 10; 4225 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4226 4227 r2.offset = 110; 4228 r2.length = 20; 4229 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4230 4231 r2.offset = 140; 4232 r2.length = 150; 4233 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4234 4235 r2.offset = 130; 4236 r2.length = 200; 4237 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4238 4239 r2.offset = 150; 4240 r2.length = 100; 4241 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4242 4243 r2.offset = 110; 4244 r2.length = 0; 4245 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4246 } 4247 4248 static bool g_lock_lba_range_done; 4249 static bool g_unlock_lba_range_done; 4250 4251 static void 4252 lock_lba_range_done(void *ctx, int status) 4253 { 4254 g_lock_lba_range_done = true; 4255 } 4256 4257 static void 4258 unlock_lba_range_done(void *ctx, int status) 4259 { 4260 g_unlock_lba_range_done = true; 4261 } 4262 4263 static void 4264 lock_lba_range_check_ranges(void) 4265 { 4266 struct spdk_bdev *bdev; 4267 struct spdk_bdev_desc *desc = NULL; 4268 struct spdk_io_channel *io_ch; 4269 struct spdk_bdev_channel *channel; 4270 struct lba_range *range; 4271 int ctx1; 4272 int rc; 4273 4274 spdk_bdev_initialize(bdev_init_cb, NULL); 4275 4276 bdev = allocate_bdev("bdev0"); 4277 4278 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4279 CU_ASSERT(rc == 0); 4280 CU_ASSERT(desc != NULL); 4281 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4282 io_ch = spdk_bdev_get_io_channel(desc); 4283 CU_ASSERT(io_ch != NULL); 4284 channel = spdk_io_channel_get_ctx(io_ch); 4285 4286 g_lock_lba_range_done = false; 4287 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4288 CU_ASSERT(rc == 0); 4289 poll_threads(); 4290 4291 CU_ASSERT(g_lock_lba_range_done == true); 4292 range = TAILQ_FIRST(&channel->locked_ranges); 4293 SPDK_CU_ASSERT_FATAL(range != NULL); 4294 CU_ASSERT(range->offset == 20); 4295 CU_ASSERT(range->length == 10); 4296 CU_ASSERT(range->owner_ch == channel); 4297 4298 /* Unlocks must exactly match a lock. */ 4299 g_unlock_lba_range_done = false; 4300 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4301 CU_ASSERT(rc == -EINVAL); 4302 CU_ASSERT(g_unlock_lba_range_done == false); 4303 4304 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4305 CU_ASSERT(rc == 0); 4306 spdk_delay_us(100); 4307 poll_threads(); 4308 4309 CU_ASSERT(g_unlock_lba_range_done == true); 4310 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4311 4312 spdk_put_io_channel(io_ch); 4313 spdk_bdev_close(desc); 4314 free_bdev(bdev); 4315 spdk_bdev_finish(bdev_fini_cb, NULL); 4316 poll_threads(); 4317 } 4318 4319 static void 4320 lock_lba_range_with_io_outstanding(void) 4321 { 4322 struct spdk_bdev *bdev; 4323 struct spdk_bdev_desc *desc = NULL; 4324 struct spdk_io_channel *io_ch; 4325 struct spdk_bdev_channel *channel; 4326 struct lba_range *range; 4327 char buf[4096]; 4328 int ctx1; 4329 int rc; 4330 4331 spdk_bdev_initialize(bdev_init_cb, NULL); 4332 4333 bdev = allocate_bdev("bdev0"); 4334 4335 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4336 CU_ASSERT(rc == 0); 4337 CU_ASSERT(desc != NULL); 4338 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4339 io_ch = spdk_bdev_get_io_channel(desc); 4340 CU_ASSERT(io_ch != NULL); 4341 channel = spdk_io_channel_get_ctx(io_ch); 4342 4343 g_io_done = false; 4344 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4345 CU_ASSERT(rc == 0); 4346 4347 g_lock_lba_range_done = false; 4348 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4349 CU_ASSERT(rc == 0); 4350 poll_threads(); 4351 4352 /* The lock should immediately become valid, since there are no outstanding 4353 * write I/O. 4354 */ 4355 CU_ASSERT(g_io_done == false); 4356 CU_ASSERT(g_lock_lba_range_done == true); 4357 range = TAILQ_FIRST(&channel->locked_ranges); 4358 SPDK_CU_ASSERT_FATAL(range != NULL); 4359 CU_ASSERT(range->offset == 20); 4360 CU_ASSERT(range->length == 10); 4361 CU_ASSERT(range->owner_ch == channel); 4362 CU_ASSERT(range->locked_ctx == &ctx1); 4363 4364 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4365 CU_ASSERT(rc == 0); 4366 stub_complete_io(1); 4367 spdk_delay_us(100); 4368 poll_threads(); 4369 4370 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4371 4372 /* Now try again, but with a write I/O. */ 4373 g_io_done = false; 4374 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4375 CU_ASSERT(rc == 0); 4376 4377 g_lock_lba_range_done = false; 4378 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4379 CU_ASSERT(rc == 0); 4380 poll_threads(); 4381 4382 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4383 * But note that the range should be on the channel's locked_list, to make sure no 4384 * new write I/O are started. 4385 */ 4386 CU_ASSERT(g_io_done == false); 4387 CU_ASSERT(g_lock_lba_range_done == false); 4388 range = TAILQ_FIRST(&channel->locked_ranges); 4389 SPDK_CU_ASSERT_FATAL(range != NULL); 4390 CU_ASSERT(range->offset == 20); 4391 CU_ASSERT(range->length == 10); 4392 4393 /* Complete the write I/O. This should make the lock valid (checked by confirming 4394 * our callback was invoked). 4395 */ 4396 stub_complete_io(1); 4397 spdk_delay_us(100); 4398 poll_threads(); 4399 CU_ASSERT(g_io_done == true); 4400 CU_ASSERT(g_lock_lba_range_done == true); 4401 4402 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4403 CU_ASSERT(rc == 0); 4404 poll_threads(); 4405 4406 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4407 4408 spdk_put_io_channel(io_ch); 4409 spdk_bdev_close(desc); 4410 free_bdev(bdev); 4411 spdk_bdev_finish(bdev_fini_cb, NULL); 4412 poll_threads(); 4413 } 4414 4415 static void 4416 lock_lba_range_overlapped(void) 4417 { 4418 struct spdk_bdev *bdev; 4419 struct spdk_bdev_desc *desc = NULL; 4420 struct spdk_io_channel *io_ch; 4421 struct spdk_bdev_channel *channel; 4422 struct lba_range *range; 4423 int ctx1; 4424 int rc; 4425 4426 spdk_bdev_initialize(bdev_init_cb, NULL); 4427 4428 bdev = allocate_bdev("bdev0"); 4429 4430 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4431 CU_ASSERT(rc == 0); 4432 CU_ASSERT(desc != NULL); 4433 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4434 io_ch = spdk_bdev_get_io_channel(desc); 4435 CU_ASSERT(io_ch != NULL); 4436 channel = spdk_io_channel_get_ctx(io_ch); 4437 4438 /* Lock range 20-29. */ 4439 g_lock_lba_range_done = false; 4440 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4441 CU_ASSERT(rc == 0); 4442 poll_threads(); 4443 4444 CU_ASSERT(g_lock_lba_range_done == true); 4445 range = TAILQ_FIRST(&channel->locked_ranges); 4446 SPDK_CU_ASSERT_FATAL(range != NULL); 4447 CU_ASSERT(range->offset == 20); 4448 CU_ASSERT(range->length == 10); 4449 4450 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4451 * 20-29. 4452 */ 4453 g_lock_lba_range_done = false; 4454 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4455 CU_ASSERT(rc == 0); 4456 poll_threads(); 4457 4458 CU_ASSERT(g_lock_lba_range_done == false); 4459 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4460 SPDK_CU_ASSERT_FATAL(range != NULL); 4461 CU_ASSERT(range->offset == 25); 4462 CU_ASSERT(range->length == 15); 4463 4464 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4465 * no longer overlaps with an active lock. 4466 */ 4467 g_unlock_lba_range_done = false; 4468 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4469 CU_ASSERT(rc == 0); 4470 poll_threads(); 4471 4472 CU_ASSERT(g_unlock_lba_range_done == true); 4473 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4474 range = TAILQ_FIRST(&channel->locked_ranges); 4475 SPDK_CU_ASSERT_FATAL(range != NULL); 4476 CU_ASSERT(range->offset == 25); 4477 CU_ASSERT(range->length == 15); 4478 4479 /* Lock 40-59. This should immediately lock since it does not overlap with the 4480 * currently active 25-39 lock. 4481 */ 4482 g_lock_lba_range_done = false; 4483 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4484 CU_ASSERT(rc == 0); 4485 poll_threads(); 4486 4487 CU_ASSERT(g_lock_lba_range_done == true); 4488 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4489 SPDK_CU_ASSERT_FATAL(range != NULL); 4490 range = TAILQ_NEXT(range, tailq); 4491 SPDK_CU_ASSERT_FATAL(range != NULL); 4492 CU_ASSERT(range->offset == 40); 4493 CU_ASSERT(range->length == 20); 4494 4495 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4496 g_lock_lba_range_done = false; 4497 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4498 CU_ASSERT(rc == 0); 4499 poll_threads(); 4500 4501 CU_ASSERT(g_lock_lba_range_done == false); 4502 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4503 SPDK_CU_ASSERT_FATAL(range != NULL); 4504 CU_ASSERT(range->offset == 35); 4505 CU_ASSERT(range->length == 10); 4506 4507 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4508 * the 40-59 lock is still active. 4509 */ 4510 g_unlock_lba_range_done = false; 4511 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4512 CU_ASSERT(rc == 0); 4513 poll_threads(); 4514 4515 CU_ASSERT(g_unlock_lba_range_done == true); 4516 CU_ASSERT(g_lock_lba_range_done == false); 4517 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4518 SPDK_CU_ASSERT_FATAL(range != NULL); 4519 CU_ASSERT(range->offset == 35); 4520 CU_ASSERT(range->length == 10); 4521 4522 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4523 * no longer any active overlapping locks. 4524 */ 4525 g_unlock_lba_range_done = false; 4526 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4527 CU_ASSERT(rc == 0); 4528 poll_threads(); 4529 4530 CU_ASSERT(g_unlock_lba_range_done == true); 4531 CU_ASSERT(g_lock_lba_range_done == true); 4532 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4533 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4534 SPDK_CU_ASSERT_FATAL(range != NULL); 4535 CU_ASSERT(range->offset == 35); 4536 CU_ASSERT(range->length == 10); 4537 4538 /* Finally, unlock 35-44. */ 4539 g_unlock_lba_range_done = false; 4540 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4541 CU_ASSERT(rc == 0); 4542 poll_threads(); 4543 4544 CU_ASSERT(g_unlock_lba_range_done == true); 4545 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4546 4547 spdk_put_io_channel(io_ch); 4548 spdk_bdev_close(desc); 4549 free_bdev(bdev); 4550 spdk_bdev_finish(bdev_fini_cb, NULL); 4551 poll_threads(); 4552 } 4553 4554 static void 4555 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4556 { 4557 g_abort_done = true; 4558 g_abort_status = bdev_io->internal.status; 4559 spdk_bdev_free_io(bdev_io); 4560 } 4561 4562 static void 4563 bdev_io_abort(void) 4564 { 4565 struct spdk_bdev *bdev; 4566 struct spdk_bdev_desc *desc = NULL; 4567 struct spdk_io_channel *io_ch; 4568 struct spdk_bdev_channel *channel; 4569 struct spdk_bdev_mgmt_channel *mgmt_ch; 4570 struct spdk_bdev_opts bdev_opts = {}; 4571 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4572 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4573 int rc; 4574 4575 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4576 bdev_opts.bdev_io_pool_size = 7; 4577 bdev_opts.bdev_io_cache_size = 2; 4578 4579 rc = spdk_bdev_set_opts(&bdev_opts); 4580 CU_ASSERT(rc == 0); 4581 spdk_bdev_initialize(bdev_init_cb, NULL); 4582 4583 bdev = allocate_bdev("bdev0"); 4584 4585 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4586 CU_ASSERT(rc == 0); 4587 CU_ASSERT(desc != NULL); 4588 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4589 io_ch = spdk_bdev_get_io_channel(desc); 4590 CU_ASSERT(io_ch != NULL); 4591 channel = spdk_io_channel_get_ctx(io_ch); 4592 mgmt_ch = channel->shared_resource->mgmt_ch; 4593 4594 g_abort_done = false; 4595 4596 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4597 4598 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4599 CU_ASSERT(rc == -ENOTSUP); 4600 4601 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4602 4603 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4604 CU_ASSERT(rc == 0); 4605 CU_ASSERT(g_abort_done == true); 4606 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4607 4608 /* Test the case that the target I/O was successfully aborted. */ 4609 g_io_done = false; 4610 4611 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4612 CU_ASSERT(rc == 0); 4613 CU_ASSERT(g_io_done == false); 4614 4615 g_abort_done = false; 4616 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4617 4618 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4619 CU_ASSERT(rc == 0); 4620 CU_ASSERT(g_io_done == true); 4621 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4622 stub_complete_io(1); 4623 CU_ASSERT(g_abort_done == true); 4624 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4625 4626 /* Test the case that the target I/O was not aborted because it completed 4627 * in the middle of execution of the abort. 4628 */ 4629 g_io_done = false; 4630 4631 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4632 CU_ASSERT(rc == 0); 4633 CU_ASSERT(g_io_done == false); 4634 4635 g_abort_done = false; 4636 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4637 4638 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4639 CU_ASSERT(rc == 0); 4640 CU_ASSERT(g_io_done == false); 4641 4642 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4643 stub_complete_io(1); 4644 CU_ASSERT(g_io_done == true); 4645 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4646 4647 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4648 stub_complete_io(1); 4649 CU_ASSERT(g_abort_done == true); 4650 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4651 4652 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4653 4654 bdev->optimal_io_boundary = 16; 4655 bdev->split_on_optimal_io_boundary = true; 4656 4657 /* Test that a single-vector command which is split is aborted correctly. 4658 * Offset 14, length 8, payload 0xF000 4659 * Child - Offset 14, length 2, payload 0xF000 4660 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4661 */ 4662 g_io_done = false; 4663 4664 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4665 CU_ASSERT(rc == 0); 4666 CU_ASSERT(g_io_done == false); 4667 4668 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4669 4670 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4671 4672 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4673 CU_ASSERT(rc == 0); 4674 CU_ASSERT(g_io_done == true); 4675 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4676 stub_complete_io(2); 4677 CU_ASSERT(g_abort_done == true); 4678 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4679 4680 /* Test that a multi-vector command that needs to be split by strip and then 4681 * needs to be split is aborted correctly. Abort is requested before the second 4682 * child I/O was submitted. The parent I/O should complete with failure without 4683 * submitting the second child I/O. 4684 */ 4685 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4686 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4687 iov[i].iov_len = 512; 4688 } 4689 4690 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4691 g_io_done = false; 4692 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4693 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4694 CU_ASSERT(rc == 0); 4695 CU_ASSERT(g_io_done == false); 4696 4697 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4698 4699 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4700 4701 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4702 CU_ASSERT(rc == 0); 4703 CU_ASSERT(g_io_done == true); 4704 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4705 stub_complete_io(1); 4706 CU_ASSERT(g_abort_done == true); 4707 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4708 4709 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4710 4711 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4712 4713 bdev->optimal_io_boundary = 16; 4714 g_io_done = false; 4715 4716 /* Test that a ingle-vector command which is split is aborted correctly. 4717 * Differently from the above, the child abort request will be submitted 4718 * sequentially due to the capacity of spdk_bdev_io. 4719 */ 4720 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4721 CU_ASSERT(rc == 0); 4722 CU_ASSERT(g_io_done == false); 4723 4724 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4725 4726 g_abort_done = false; 4727 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4728 4729 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4730 CU_ASSERT(rc == 0); 4731 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4732 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4733 4734 stub_complete_io(1); 4735 CU_ASSERT(g_io_done == true); 4736 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4737 stub_complete_io(3); 4738 CU_ASSERT(g_abort_done == true); 4739 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4740 4741 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4742 4743 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4744 4745 spdk_put_io_channel(io_ch); 4746 spdk_bdev_close(desc); 4747 free_bdev(bdev); 4748 spdk_bdev_finish(bdev_fini_cb, NULL); 4749 poll_threads(); 4750 } 4751 4752 static void 4753 bdev_unmap(void) 4754 { 4755 struct spdk_bdev *bdev; 4756 struct spdk_bdev_desc *desc = NULL; 4757 struct spdk_io_channel *ioch; 4758 struct spdk_bdev_channel *bdev_ch; 4759 struct ut_expected_io *expected_io; 4760 struct spdk_bdev_opts bdev_opts = {}; 4761 uint32_t i, num_outstanding; 4762 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4763 int rc; 4764 4765 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4766 bdev_opts.bdev_io_pool_size = 512; 4767 bdev_opts.bdev_io_cache_size = 64; 4768 rc = spdk_bdev_set_opts(&bdev_opts); 4769 CU_ASSERT(rc == 0); 4770 4771 spdk_bdev_initialize(bdev_init_cb, NULL); 4772 bdev = allocate_bdev("bdev"); 4773 4774 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4775 CU_ASSERT_EQUAL(rc, 0); 4776 SPDK_CU_ASSERT_FATAL(desc != NULL); 4777 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4778 ioch = spdk_bdev_get_io_channel(desc); 4779 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4780 bdev_ch = spdk_io_channel_get_ctx(ioch); 4781 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4782 4783 fn_table.submit_request = stub_submit_request; 4784 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4785 4786 /* Case 1: First test the request won't be split */ 4787 num_blocks = 32; 4788 4789 g_io_done = false; 4790 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4791 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4792 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4793 CU_ASSERT_EQUAL(rc, 0); 4794 CU_ASSERT(g_io_done == false); 4795 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4796 stub_complete_io(1); 4797 CU_ASSERT(g_io_done == true); 4798 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4799 4800 /* Case 2: Test the split with 2 children requests */ 4801 bdev->max_unmap = 8; 4802 bdev->max_unmap_segments = 2; 4803 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4804 num_blocks = max_unmap_blocks * 2; 4805 offset = 0; 4806 4807 g_io_done = false; 4808 for (i = 0; i < 2; i++) { 4809 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4810 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4811 offset += max_unmap_blocks; 4812 } 4813 4814 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4815 CU_ASSERT_EQUAL(rc, 0); 4816 CU_ASSERT(g_io_done == false); 4817 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4818 stub_complete_io(2); 4819 CU_ASSERT(g_io_done == true); 4820 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4821 4822 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4823 num_children = 15; 4824 num_blocks = max_unmap_blocks * num_children; 4825 g_io_done = false; 4826 offset = 0; 4827 for (i = 0; i < num_children; i++) { 4828 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 4829 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4830 offset += max_unmap_blocks; 4831 } 4832 4833 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4834 CU_ASSERT_EQUAL(rc, 0); 4835 CU_ASSERT(g_io_done == false); 4836 4837 while (num_children > 0) { 4838 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4839 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4840 stub_complete_io(num_outstanding); 4841 num_children -= num_outstanding; 4842 } 4843 CU_ASSERT(g_io_done == true); 4844 4845 spdk_put_io_channel(ioch); 4846 spdk_bdev_close(desc); 4847 free_bdev(bdev); 4848 spdk_bdev_finish(bdev_fini_cb, NULL); 4849 poll_threads(); 4850 } 4851 4852 static void 4853 bdev_write_zeroes_split_test(void) 4854 { 4855 struct spdk_bdev *bdev; 4856 struct spdk_bdev_desc *desc = NULL; 4857 struct spdk_io_channel *ioch; 4858 struct spdk_bdev_channel *bdev_ch; 4859 struct ut_expected_io *expected_io; 4860 struct spdk_bdev_opts bdev_opts = {}; 4861 uint32_t i, num_outstanding; 4862 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 4863 int rc; 4864 4865 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4866 bdev_opts.bdev_io_pool_size = 512; 4867 bdev_opts.bdev_io_cache_size = 64; 4868 rc = spdk_bdev_set_opts(&bdev_opts); 4869 CU_ASSERT(rc == 0); 4870 4871 spdk_bdev_initialize(bdev_init_cb, NULL); 4872 bdev = allocate_bdev("bdev"); 4873 4874 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4875 CU_ASSERT_EQUAL(rc, 0); 4876 SPDK_CU_ASSERT_FATAL(desc != NULL); 4877 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4878 ioch = spdk_bdev_get_io_channel(desc); 4879 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4880 bdev_ch = spdk_io_channel_get_ctx(ioch); 4881 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4882 4883 fn_table.submit_request = stub_submit_request; 4884 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4885 4886 /* Case 1: First test the request won't be split */ 4887 num_blocks = 32; 4888 4889 g_io_done = false; 4890 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4891 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4892 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4893 CU_ASSERT_EQUAL(rc, 0); 4894 CU_ASSERT(g_io_done == false); 4895 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4896 stub_complete_io(1); 4897 CU_ASSERT(g_io_done == true); 4898 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4899 4900 /* Case 2: Test the split with 2 children requests */ 4901 max_write_zeroes_blocks = 8; 4902 bdev->max_write_zeroes = max_write_zeroes_blocks; 4903 num_blocks = max_write_zeroes_blocks * 2; 4904 offset = 0; 4905 4906 g_io_done = false; 4907 for (i = 0; i < 2; i++) { 4908 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4909 0); 4910 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4911 offset += max_write_zeroes_blocks; 4912 } 4913 4914 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4915 CU_ASSERT_EQUAL(rc, 0); 4916 CU_ASSERT(g_io_done == false); 4917 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4918 stub_complete_io(2); 4919 CU_ASSERT(g_io_done == true); 4920 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4921 4922 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 4923 num_children = 15; 4924 num_blocks = max_write_zeroes_blocks * num_children; 4925 g_io_done = false; 4926 offset = 0; 4927 for (i = 0; i < num_children; i++) { 4928 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 4929 0); 4930 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4931 offset += max_write_zeroes_blocks; 4932 } 4933 4934 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4935 CU_ASSERT_EQUAL(rc, 0); 4936 CU_ASSERT(g_io_done == false); 4937 4938 while (num_children > 0) { 4939 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 4940 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 4941 stub_complete_io(num_outstanding); 4942 num_children -= num_outstanding; 4943 } 4944 CU_ASSERT(g_io_done == true); 4945 4946 spdk_put_io_channel(ioch); 4947 spdk_bdev_close(desc); 4948 free_bdev(bdev); 4949 spdk_bdev_finish(bdev_fini_cb, NULL); 4950 poll_threads(); 4951 } 4952 4953 static void 4954 bdev_set_options_test(void) 4955 { 4956 struct spdk_bdev_opts bdev_opts = {}; 4957 int rc; 4958 4959 /* Case1: Do not set opts_size */ 4960 rc = spdk_bdev_set_opts(&bdev_opts); 4961 CU_ASSERT(rc == -1); 4962 4963 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4964 bdev_opts.bdev_io_pool_size = 4; 4965 bdev_opts.bdev_io_cache_size = 2; 4966 bdev_opts.small_buf_pool_size = 4; 4967 4968 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 4969 rc = spdk_bdev_set_opts(&bdev_opts); 4970 CU_ASSERT(rc == -1); 4971 4972 /* Case 3: Do not set valid large_buf_pool_size */ 4973 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 4974 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 4975 rc = spdk_bdev_set_opts(&bdev_opts); 4976 CU_ASSERT(rc == -1); 4977 4978 /* Case4: set valid large buf_pool_size */ 4979 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 4980 rc = spdk_bdev_set_opts(&bdev_opts); 4981 CU_ASSERT(rc == 0); 4982 4983 /* Case5: Set different valid value for small and large buf pool */ 4984 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 4985 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 4986 rc = spdk_bdev_set_opts(&bdev_opts); 4987 CU_ASSERT(rc == 0); 4988 } 4989 4990 static uint64_t 4991 get_ns_time(void) 4992 { 4993 int rc; 4994 struct timespec ts; 4995 4996 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 4997 CU_ASSERT(rc == 0); 4998 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 4999 } 5000 5001 static int 5002 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5003 { 5004 int h1, h2; 5005 5006 if (bdev_name == NULL) { 5007 return -1; 5008 } else { 5009 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5010 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5011 5012 return spdk_max(h1, h2) + 1; 5013 } 5014 } 5015 5016 static void 5017 bdev_multi_allocation(void) 5018 { 5019 const int max_bdev_num = 1024 * 16; 5020 char name[max_bdev_num][16]; 5021 char noexist_name[] = "invalid_bdev"; 5022 struct spdk_bdev *bdev[max_bdev_num]; 5023 int i, j; 5024 uint64_t last_time; 5025 int bdev_num; 5026 int height; 5027 5028 for (j = 0; j < max_bdev_num; j++) { 5029 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5030 } 5031 5032 for (i = 0; i < 16; i++) { 5033 last_time = get_ns_time(); 5034 bdev_num = 1024 * (i + 1); 5035 for (j = 0; j < bdev_num; j++) { 5036 bdev[j] = allocate_bdev(name[j]); 5037 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5038 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5039 } 5040 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5041 (get_ns_time() - last_time) / 1000 / 1000); 5042 for (j = 0; j < bdev_num; j++) { 5043 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5044 } 5045 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5046 5047 for (j = 0; j < bdev_num; j++) { 5048 free_bdev(bdev[j]); 5049 } 5050 for (j = 0; j < bdev_num; j++) { 5051 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5052 } 5053 } 5054 } 5055 5056 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5057 5058 static int 5059 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5060 int array_size) 5061 { 5062 if (array_size > 0 && domains) { 5063 domains[0] = g_bdev_memory_domain; 5064 } 5065 5066 return 1; 5067 } 5068 5069 static void 5070 bdev_get_memory_domains(void) 5071 { 5072 struct spdk_bdev_fn_table fn_table = { 5073 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5074 }; 5075 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5076 struct spdk_memory_domain *domains[2] = {}; 5077 int rc; 5078 5079 /* bdev is NULL */ 5080 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5081 CU_ASSERT(rc == -EINVAL); 5082 5083 /* domains is NULL */ 5084 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5085 CU_ASSERT(rc == 1); 5086 5087 /* array size is 0 */ 5088 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5089 CU_ASSERT(rc == 1); 5090 5091 /* get_supported_dma_device_types op is set */ 5092 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5093 CU_ASSERT(rc == 1); 5094 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5095 5096 /* get_supported_dma_device_types op is not set */ 5097 fn_table.get_memory_domains = NULL; 5098 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5099 CU_ASSERT(rc == 0); 5100 } 5101 5102 static void 5103 bdev_writev_readv_ext(void) 5104 { 5105 struct spdk_bdev *bdev; 5106 struct spdk_bdev_desc *desc = NULL; 5107 struct spdk_io_channel *io_ch; 5108 char io_buf[512]; 5109 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5110 struct ut_expected_io *expected_io; 5111 struct spdk_bdev_ext_io_opts ext_io_opts = { 5112 .metadata = (void *)0xFF000000, 5113 .size = sizeof(ext_io_opts) 5114 }; 5115 int rc; 5116 5117 spdk_bdev_initialize(bdev_init_cb, NULL); 5118 5119 bdev = allocate_bdev("bdev0"); 5120 bdev->md_interleave = false; 5121 bdev->md_len = 8; 5122 5123 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5124 CU_ASSERT(rc == 0); 5125 SPDK_CU_ASSERT_FATAL(desc != NULL); 5126 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5127 io_ch = spdk_bdev_get_io_channel(desc); 5128 CU_ASSERT(io_ch != NULL); 5129 5130 /* Test 1, Simple test */ 5131 g_io_done = false; 5132 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5133 expected_io->md_buf = ext_io_opts.metadata; 5134 expected_io->ext_io_opts = &ext_io_opts; 5135 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5136 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5137 5138 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5139 5140 CU_ASSERT(rc == 0); 5141 CU_ASSERT(g_io_done == false); 5142 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5143 stub_complete_io(1); 5144 CU_ASSERT(g_io_done == true); 5145 5146 g_io_done = false; 5147 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5148 expected_io->md_buf = ext_io_opts.metadata; 5149 expected_io->ext_io_opts = &ext_io_opts; 5150 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5151 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5152 5153 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5154 5155 CU_ASSERT(rc == 0); 5156 CU_ASSERT(g_io_done == false); 5157 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5158 stub_complete_io(1); 5159 CU_ASSERT(g_io_done == true); 5160 5161 /* Test 2, invalid ext_opts size */ 5162 ext_io_opts.size = 0; 5163 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5164 CU_ASSERT(rc != 0); 5165 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5166 CU_ASSERT(rc != 0); 5167 5168 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5169 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5170 CU_ASSERT(rc != 0); 5171 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5172 CU_ASSERT(rc != 0); 5173 5174 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5175 sizeof(ext_io_opts.metadata) - 1; 5176 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5177 CU_ASSERT(rc != 0); 5178 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5179 CU_ASSERT(rc != 0); 5180 5181 /* Test 3, Check that IO request with ext_opts and metadata is split correctly 5182 * Offset 14, length 8, payload 0xF000 5183 * Child - Offset 14, length 2, payload 0xF000 5184 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5185 */ 5186 bdev->optimal_io_boundary = 16; 5187 bdev->split_on_optimal_io_boundary = true; 5188 bdev->md_interleave = false; 5189 bdev->md_len = 8; 5190 5191 iov.iov_base = (void *)0xF000; 5192 iov.iov_len = 4096; 5193 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5194 ext_io_opts.metadata = (void *)0xFF000000; 5195 ext_io_opts.size = sizeof(ext_io_opts); 5196 g_io_done = false; 5197 5198 /* read */ 5199 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5200 expected_io->md_buf = ext_io_opts.metadata; 5201 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5202 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5203 5204 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5205 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5206 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5207 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5208 5209 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5210 CU_ASSERT(rc == 0); 5211 CU_ASSERT(g_io_done == false); 5212 5213 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5214 stub_complete_io(2); 5215 CU_ASSERT(g_io_done == true); 5216 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5217 5218 /* write */ 5219 g_io_done = false; 5220 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5221 expected_io->md_buf = ext_io_opts.metadata; 5222 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5223 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5224 5225 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5226 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5227 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5228 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5229 5230 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5231 CU_ASSERT(rc == 0); 5232 CU_ASSERT(g_io_done == false); 5233 5234 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5235 stub_complete_io(2); 5236 CU_ASSERT(g_io_done == true); 5237 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5238 5239 /* Test 4, Verify data pull/push 5240 * bdev doens't support memory domains, so buffers from bdev memory pool will be used */ 5241 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5242 5243 g_io_done = false; 5244 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5245 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5246 expected_io->ext_io_opts = &ext_io_opts; 5247 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5248 5249 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5250 5251 CU_ASSERT(rc == 0); 5252 CU_ASSERT(g_io_done == false); 5253 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5254 stub_complete_io(1); 5255 CU_ASSERT(g_memory_domain_push_data_called == true); 5256 CU_ASSERT(g_io_done == true); 5257 5258 g_io_done = false; 5259 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5260 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5261 expected_io->ext_io_opts = &ext_io_opts; 5262 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5263 5264 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5265 5266 CU_ASSERT(rc == 0); 5267 CU_ASSERT(g_memory_domain_pull_data_called == true); 5268 CU_ASSERT(g_io_done == false); 5269 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5270 stub_complete_io(1); 5271 CU_ASSERT(g_io_done == true); 5272 5273 spdk_put_io_channel(io_ch); 5274 spdk_bdev_close(desc); 5275 free_bdev(bdev); 5276 spdk_bdev_finish(bdev_fini_cb, NULL); 5277 poll_threads(); 5278 } 5279 5280 static void 5281 bdev_register_uuid_alias(void) 5282 { 5283 struct spdk_bdev *bdev, *second; 5284 char uuid[SPDK_UUID_STRING_LEN]; 5285 int rc; 5286 5287 spdk_bdev_initialize(bdev_init_cb, NULL); 5288 bdev = allocate_bdev("bdev0"); 5289 5290 /* Make sure an UUID was generated */ 5291 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5292 5293 /* Check that an UUID alias was registered */ 5294 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5295 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5296 5297 /* Unregister the bdev */ 5298 spdk_bdev_unregister(bdev, NULL, NULL); 5299 poll_threads(); 5300 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5301 5302 /* Check the same, but this time register the bdev with non-zero UUID */ 5303 rc = spdk_bdev_register(bdev); 5304 CU_ASSERT_EQUAL(rc, 0); 5305 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5306 5307 /* Unregister the bdev */ 5308 spdk_bdev_unregister(bdev, NULL, NULL); 5309 poll_threads(); 5310 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5311 5312 /* Regiser the bdev using UUID as the name */ 5313 bdev->name = uuid; 5314 rc = spdk_bdev_register(bdev); 5315 CU_ASSERT_EQUAL(rc, 0); 5316 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5317 5318 /* Unregister the bdev */ 5319 spdk_bdev_unregister(bdev, NULL, NULL); 5320 poll_threads(); 5321 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5322 5323 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5324 bdev->name = "bdev0"; 5325 second = allocate_bdev("bdev1"); 5326 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5327 rc = spdk_bdev_register(bdev); 5328 CU_ASSERT_EQUAL(rc, -EEXIST); 5329 5330 /* Regenerate the UUID and re-check */ 5331 spdk_uuid_generate(&bdev->uuid); 5332 rc = spdk_bdev_register(bdev); 5333 CU_ASSERT_EQUAL(rc, 0); 5334 5335 /* And check that both bdevs can be retrieved through their UUIDs */ 5336 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5337 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5338 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5339 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5340 5341 free_bdev(second); 5342 free_bdev(bdev); 5343 spdk_bdev_finish(bdev_fini_cb, NULL); 5344 poll_threads(); 5345 } 5346 5347 static void 5348 bdev_unregister_by_name(void) 5349 { 5350 struct spdk_bdev *bdev; 5351 int rc; 5352 5353 bdev = allocate_bdev("bdev"); 5354 5355 g_event_type1 = 0xFF; 5356 g_unregister_arg = NULL; 5357 g_unregister_rc = -1; 5358 5359 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5360 CU_ASSERT(rc == -ENODEV); 5361 5362 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5363 CU_ASSERT(rc == -ENODEV); 5364 5365 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5366 CU_ASSERT(rc == 0); 5367 5368 /* Check that unregister callback is delayed */ 5369 CU_ASSERT(g_unregister_arg == NULL); 5370 CU_ASSERT(g_unregister_rc == -1); 5371 5372 poll_threads(); 5373 5374 /* Event callback shall not be issued because device was closed */ 5375 CU_ASSERT(g_event_type1 == 0xFF); 5376 /* Unregister callback is issued */ 5377 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5378 CU_ASSERT(g_unregister_rc == 0); 5379 5380 free_bdev(bdev); 5381 } 5382 5383 static int 5384 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5385 { 5386 int *count = ctx; 5387 5388 (*count)++; 5389 5390 return 0; 5391 } 5392 5393 static void 5394 for_each_bdev_test(void) 5395 { 5396 struct spdk_bdev *bdev[8]; 5397 int rc, count; 5398 5399 bdev[0] = allocate_bdev("bdev0"); 5400 5401 bdev[1] = allocate_bdev("bdev1"); 5402 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5403 CU_ASSERT(rc == 0); 5404 5405 bdev[2] = allocate_bdev("bdev2"); 5406 5407 bdev[3] = allocate_bdev("bdev3"); 5408 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5409 CU_ASSERT(rc == 0); 5410 5411 bdev[4] = allocate_bdev("bdev4"); 5412 5413 bdev[5] = allocate_bdev("bdev5"); 5414 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5415 CU_ASSERT(rc == 0); 5416 5417 bdev[6] = allocate_bdev("bdev6"); 5418 5419 bdev[7] = allocate_bdev("bdev7"); 5420 5421 count = 0; 5422 rc = spdk_for_each_bdev(&count, count_bdevs); 5423 CU_ASSERT(rc == 0); 5424 CU_ASSERT(count == 8); 5425 5426 count = 0; 5427 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5428 CU_ASSERT(rc == 0); 5429 CU_ASSERT(count == 5); 5430 5431 free_bdev(bdev[0]); 5432 free_bdev(bdev[1]); 5433 free_bdev(bdev[2]); 5434 free_bdev(bdev[3]); 5435 free_bdev(bdev[4]); 5436 free_bdev(bdev[5]); 5437 free_bdev(bdev[6]); 5438 free_bdev(bdev[7]); 5439 } 5440 5441 int 5442 main(int argc, char **argv) 5443 { 5444 CU_pSuite suite = NULL; 5445 unsigned int num_failures; 5446 5447 CU_set_error_action(CUEA_ABORT); 5448 CU_initialize_registry(); 5449 5450 suite = CU_add_suite("bdev", null_init, null_clean); 5451 5452 CU_ADD_TEST(suite, bytes_to_blocks_test); 5453 CU_ADD_TEST(suite, num_blocks_test); 5454 CU_ADD_TEST(suite, io_valid_test); 5455 CU_ADD_TEST(suite, open_write_test); 5456 CU_ADD_TEST(suite, claim_test); 5457 CU_ADD_TEST(suite, alias_add_del_test); 5458 CU_ADD_TEST(suite, get_device_stat_test); 5459 CU_ADD_TEST(suite, bdev_io_types_test); 5460 CU_ADD_TEST(suite, bdev_io_wait_test); 5461 CU_ADD_TEST(suite, bdev_io_spans_split_test); 5462 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 5463 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 5464 CU_ADD_TEST(suite, bdev_io_mix_split_test); 5465 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 5466 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 5467 CU_ADD_TEST(suite, bdev_io_alignment); 5468 CU_ADD_TEST(suite, bdev_histograms); 5469 CU_ADD_TEST(suite, bdev_write_zeroes); 5470 CU_ADD_TEST(suite, bdev_compare_and_write); 5471 CU_ADD_TEST(suite, bdev_compare); 5472 CU_ADD_TEST(suite, bdev_zcopy_write); 5473 CU_ADD_TEST(suite, bdev_zcopy_read); 5474 CU_ADD_TEST(suite, bdev_open_while_hotremove); 5475 CU_ADD_TEST(suite, bdev_close_while_hotremove); 5476 CU_ADD_TEST(suite, bdev_open_ext); 5477 CU_ADD_TEST(suite, bdev_open_ext_unregister); 5478 CU_ADD_TEST(suite, bdev_set_io_timeout); 5479 CU_ADD_TEST(suite, bdev_set_qd_sampling); 5480 CU_ADD_TEST(suite, lba_range_overlap); 5481 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 5482 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 5483 CU_ADD_TEST(suite, lock_lba_range_overlapped); 5484 CU_ADD_TEST(suite, bdev_io_abort); 5485 CU_ADD_TEST(suite, bdev_unmap); 5486 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 5487 CU_ADD_TEST(suite, bdev_set_options_test); 5488 CU_ADD_TEST(suite, bdev_multi_allocation); 5489 CU_ADD_TEST(suite, bdev_get_memory_domains); 5490 CU_ADD_TEST(suite, bdev_writev_readv_ext); 5491 CU_ADD_TEST(suite, bdev_register_uuid_alias); 5492 CU_ADD_TEST(suite, bdev_unregister_by_name); 5493 CU_ADD_TEST(suite, for_each_bdev_test); 5494 5495 allocate_cores(1); 5496 allocate_threads(1); 5497 set_thread(0); 5498 5499 CU_basic_set_mode(CU_BRM_VERBOSE); 5500 CU_basic_run_tests(); 5501 num_failures = CU_get_number_of_failures(); 5502 CU_cleanup_registry(); 5503 5504 free_threads(); 5505 free_cores(); 5506 5507 return num_failures; 5508 } 5509