1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 25 static bool g_memory_domain_pull_data_called; 26 static bool g_memory_domain_push_data_called; 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 g_memory_domain_pull_data_called = true; 35 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 g_memory_domain_push_data_called = true; 47 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 int g_status; 53 int g_count; 54 enum spdk_bdev_event_type g_event_type1; 55 enum spdk_bdev_event_type g_event_type2; 56 enum spdk_bdev_event_type g_event_type3; 57 enum spdk_bdev_event_type g_event_type4; 58 struct spdk_histogram_data *g_histogram; 59 void *g_unregister_arg; 60 int g_unregister_rc; 61 62 void 63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 64 int *sc, int *sk, int *asc, int *ascq) 65 { 66 } 67 68 static int 69 null_init(void) 70 { 71 return 0; 72 } 73 74 static int 75 null_clean(void) 76 { 77 return 0; 78 } 79 80 static int 81 stub_destruct(void *ctx) 82 { 83 return 0; 84 } 85 86 struct ut_expected_io { 87 uint8_t type; 88 uint64_t offset; 89 uint64_t src_offset; 90 uint64_t length; 91 int iovcnt; 92 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 93 void *md_buf; 94 TAILQ_ENTRY(ut_expected_io) link; 95 }; 96 97 struct bdev_ut_channel { 98 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 99 uint32_t outstanding_io_count; 100 TAILQ_HEAD(, ut_expected_io) expected_io; 101 }; 102 103 static bool g_io_done; 104 static struct spdk_bdev_io *g_bdev_io; 105 static enum spdk_bdev_io_status g_io_status; 106 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 107 static uint32_t g_bdev_ut_io_device; 108 static struct bdev_ut_channel *g_bdev_ut_channel; 109 static void *g_compare_read_buf; 110 static uint32_t g_compare_read_buf_len; 111 static void *g_compare_write_buf; 112 static uint32_t g_compare_write_buf_len; 113 static void *g_compare_md_buf; 114 static bool g_abort_done; 115 static enum spdk_bdev_io_status g_abort_status; 116 static void *g_zcopy_read_buf; 117 static uint32_t g_zcopy_read_buf_len; 118 static void *g_zcopy_write_buf; 119 static uint32_t g_zcopy_write_buf_len; 120 static struct spdk_bdev_io *g_zcopy_bdev_io; 121 static uint64_t g_seek_data_offset; 122 static uint64_t g_seek_hole_offset; 123 static uint64_t g_seek_offset; 124 125 static struct ut_expected_io * 126 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 127 { 128 struct ut_expected_io *expected_io; 129 130 expected_io = calloc(1, sizeof(*expected_io)); 131 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 132 133 expected_io->type = type; 134 expected_io->offset = offset; 135 expected_io->length = length; 136 expected_io->iovcnt = iovcnt; 137 138 return expected_io; 139 } 140 141 static struct ut_expected_io * 142 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 143 { 144 struct ut_expected_io *expected_io; 145 146 expected_io = calloc(1, sizeof(*expected_io)); 147 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 148 149 expected_io->type = type; 150 expected_io->offset = offset; 151 expected_io->src_offset = src_offset; 152 expected_io->length = length; 153 154 return expected_io; 155 } 156 157 static void 158 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 159 { 160 expected_io->iov[pos].iov_base = base; 161 expected_io->iov[pos].iov_len = len; 162 } 163 164 static void 165 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 166 { 167 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 168 struct ut_expected_io *expected_io; 169 struct iovec *iov, *expected_iov; 170 struct spdk_bdev_io *bio_to_abort; 171 int i; 172 173 g_bdev_io = bdev_io; 174 175 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 176 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 177 178 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 179 CU_ASSERT(g_compare_read_buf_len == len); 180 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 181 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 182 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 183 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 184 } 185 } 186 187 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 188 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 189 190 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 191 CU_ASSERT(g_compare_write_buf_len == len); 192 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 193 } 194 195 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 196 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 197 198 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 199 CU_ASSERT(g_compare_read_buf_len == len); 200 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 201 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 202 } 203 if (bdev_io->u.bdev.md_buf && 204 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 205 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 206 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 207 } 208 } 209 210 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 211 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 212 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 213 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 214 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 215 ch->outstanding_io_count--; 216 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 217 break; 218 } 219 } 220 } 221 } 222 223 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 224 if (bdev_io->u.bdev.zcopy.start) { 225 g_zcopy_bdev_io = bdev_io; 226 if (bdev_io->u.bdev.zcopy.populate) { 227 /* Start of a read */ 228 CU_ASSERT(g_zcopy_read_buf != NULL); 229 CU_ASSERT(g_zcopy_read_buf_len > 0); 230 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 231 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 232 bdev_io->u.bdev.iovcnt = 1; 233 } else { 234 /* Start of a write */ 235 CU_ASSERT(g_zcopy_write_buf != NULL); 236 CU_ASSERT(g_zcopy_write_buf_len > 0); 237 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 238 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 239 bdev_io->u.bdev.iovcnt = 1; 240 } 241 } else { 242 if (bdev_io->u.bdev.zcopy.commit) { 243 /* End of write */ 244 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 245 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 246 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 247 g_zcopy_write_buf = NULL; 248 g_zcopy_write_buf_len = 0; 249 } else { 250 /* End of read */ 251 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 252 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 253 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 254 g_zcopy_read_buf = NULL; 255 g_zcopy_read_buf_len = 0; 256 } 257 } 258 } 259 260 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 261 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 262 } 263 264 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 265 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 266 } 267 268 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 269 ch->outstanding_io_count++; 270 271 expected_io = TAILQ_FIRST(&ch->expected_io); 272 if (expected_io == NULL) { 273 return; 274 } 275 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 276 277 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 278 CU_ASSERT(bdev_io->type == expected_io->type); 279 } 280 281 if (expected_io->md_buf != NULL) { 282 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 283 } 284 285 if (expected_io->length == 0) { 286 free(expected_io); 287 return; 288 } 289 290 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 291 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 292 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 293 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 294 } 295 296 if (expected_io->iovcnt == 0) { 297 free(expected_io); 298 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 299 return; 300 } 301 302 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 303 for (i = 0; i < expected_io->iovcnt; i++) { 304 expected_iov = &expected_io->iov[i]; 305 if (bdev_io->internal.orig_iovcnt == 0) { 306 iov = &bdev_io->u.bdev.iovs[i]; 307 } else { 308 iov = bdev_io->internal.orig_iovs; 309 } 310 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 311 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 312 } 313 314 free(expected_io); 315 } 316 317 static void 318 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 319 struct spdk_bdev_io *bdev_io, bool success) 320 { 321 CU_ASSERT(success == true); 322 323 stub_submit_request(_ch, bdev_io); 324 } 325 326 static void 327 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 328 { 329 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 330 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 331 } 332 333 static uint32_t 334 stub_complete_io(uint32_t num_to_complete) 335 { 336 struct bdev_ut_channel *ch = g_bdev_ut_channel; 337 struct spdk_bdev_io *bdev_io; 338 static enum spdk_bdev_io_status io_status; 339 uint32_t num_completed = 0; 340 341 while (num_completed < num_to_complete) { 342 if (TAILQ_EMPTY(&ch->outstanding_io)) { 343 break; 344 } 345 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 346 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 347 ch->outstanding_io_count--; 348 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 349 g_io_exp_status; 350 spdk_bdev_io_complete(bdev_io, io_status); 351 num_completed++; 352 } 353 354 return num_completed; 355 } 356 357 static struct spdk_io_channel * 358 bdev_ut_get_io_channel(void *ctx) 359 { 360 return spdk_get_io_channel(&g_bdev_ut_io_device); 361 } 362 363 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 364 [SPDK_BDEV_IO_TYPE_READ] = true, 365 [SPDK_BDEV_IO_TYPE_WRITE] = true, 366 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 367 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 368 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 369 [SPDK_BDEV_IO_TYPE_RESET] = true, 370 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 371 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 372 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 373 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 374 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 375 [SPDK_BDEV_IO_TYPE_ABORT] = true, 376 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 377 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 378 [SPDK_BDEV_IO_TYPE_COPY] = true, 379 }; 380 381 static void 382 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 383 { 384 g_io_types_supported[io_type] = enable; 385 } 386 387 static bool 388 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 389 { 390 return g_io_types_supported[io_type]; 391 } 392 393 static struct spdk_bdev_fn_table fn_table = { 394 .destruct = stub_destruct, 395 .submit_request = stub_submit_request, 396 .get_io_channel = bdev_ut_get_io_channel, 397 .io_type_supported = stub_io_type_supported, 398 }; 399 400 static int 401 bdev_ut_create_ch(void *io_device, void *ctx_buf) 402 { 403 struct bdev_ut_channel *ch = ctx_buf; 404 405 CU_ASSERT(g_bdev_ut_channel == NULL); 406 g_bdev_ut_channel = ch; 407 408 TAILQ_INIT(&ch->outstanding_io); 409 ch->outstanding_io_count = 0; 410 TAILQ_INIT(&ch->expected_io); 411 return 0; 412 } 413 414 static void 415 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 416 { 417 CU_ASSERT(g_bdev_ut_channel != NULL); 418 g_bdev_ut_channel = NULL; 419 } 420 421 struct spdk_bdev_module bdev_ut_if; 422 423 static int 424 bdev_ut_module_init(void) 425 { 426 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 427 sizeof(struct bdev_ut_channel), NULL); 428 spdk_bdev_module_init_done(&bdev_ut_if); 429 return 0; 430 } 431 432 static void 433 bdev_ut_module_fini(void) 434 { 435 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 436 } 437 438 struct spdk_bdev_module bdev_ut_if = { 439 .name = "bdev_ut", 440 .module_init = bdev_ut_module_init, 441 .module_fini = bdev_ut_module_fini, 442 .async_init = true, 443 }; 444 445 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 446 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 447 448 static int 449 vbdev_ut_module_init(void) 450 { 451 return 0; 452 } 453 454 static void 455 vbdev_ut_module_fini(void) 456 { 457 } 458 459 struct spdk_bdev_module vbdev_ut_if = { 460 .name = "vbdev_ut", 461 .module_init = vbdev_ut_module_init, 462 .module_fini = vbdev_ut_module_fini, 463 .examine_config = vbdev_ut_examine_config, 464 .examine_disk = vbdev_ut_examine_disk, 465 }; 466 467 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 468 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 469 470 struct ut_examine_ctx { 471 void (*examine_config)(struct spdk_bdev *bdev); 472 void (*examine_disk)(struct spdk_bdev *bdev); 473 uint32_t examine_config_count; 474 uint32_t examine_disk_count; 475 }; 476 477 static void 478 vbdev_ut_examine_config(struct spdk_bdev *bdev) 479 { 480 struct ut_examine_ctx *ctx = bdev->ctxt; 481 482 if (ctx != NULL) { 483 ctx->examine_config_count++; 484 if (ctx->examine_config != NULL) { 485 ctx->examine_config(bdev); 486 } 487 } 488 489 spdk_bdev_module_examine_done(&vbdev_ut_if); 490 } 491 492 static void 493 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 494 { 495 struct ut_examine_ctx *ctx = bdev->ctxt; 496 497 if (ctx != NULL) { 498 ctx->examine_disk_count++; 499 if (ctx->examine_disk != NULL) { 500 ctx->examine_disk(bdev); 501 } 502 } 503 504 spdk_bdev_module_examine_done(&vbdev_ut_if); 505 } 506 507 static struct spdk_bdev * 508 allocate_bdev_ctx(char *name, void *ctx) 509 { 510 struct spdk_bdev *bdev; 511 int rc; 512 513 bdev = calloc(1, sizeof(*bdev)); 514 SPDK_CU_ASSERT_FATAL(bdev != NULL); 515 516 bdev->ctxt = ctx; 517 bdev->name = name; 518 bdev->fn_table = &fn_table; 519 bdev->module = &bdev_ut_if; 520 bdev->blockcnt = 1024; 521 bdev->blocklen = 512; 522 523 spdk_uuid_generate(&bdev->uuid); 524 525 rc = spdk_bdev_register(bdev); 526 poll_threads(); 527 CU_ASSERT(rc == 0); 528 529 return bdev; 530 } 531 532 static struct spdk_bdev * 533 allocate_bdev(char *name) 534 { 535 return allocate_bdev_ctx(name, NULL); 536 } 537 538 static struct spdk_bdev * 539 allocate_vbdev(char *name) 540 { 541 struct spdk_bdev *bdev; 542 int rc; 543 544 bdev = calloc(1, sizeof(*bdev)); 545 SPDK_CU_ASSERT_FATAL(bdev != NULL); 546 547 bdev->name = name; 548 bdev->fn_table = &fn_table; 549 bdev->module = &vbdev_ut_if; 550 551 rc = spdk_bdev_register(bdev); 552 poll_threads(); 553 CU_ASSERT(rc == 0); 554 555 return bdev; 556 } 557 558 static void 559 free_bdev(struct spdk_bdev *bdev) 560 { 561 spdk_bdev_unregister(bdev, NULL, NULL); 562 poll_threads(); 563 memset(bdev, 0xFF, sizeof(*bdev)); 564 free(bdev); 565 } 566 567 static void 568 free_vbdev(struct spdk_bdev *bdev) 569 { 570 spdk_bdev_unregister(bdev, NULL, NULL); 571 poll_threads(); 572 memset(bdev, 0xFF, sizeof(*bdev)); 573 free(bdev); 574 } 575 576 static void 577 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 578 { 579 const char *bdev_name; 580 581 CU_ASSERT(bdev != NULL); 582 CU_ASSERT(rc == 0); 583 bdev_name = spdk_bdev_get_name(bdev); 584 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 585 586 free(stat); 587 588 *(bool *)cb_arg = true; 589 } 590 591 static void 592 bdev_unregister_cb(void *cb_arg, int rc) 593 { 594 g_unregister_arg = cb_arg; 595 g_unregister_rc = rc; 596 } 597 598 static void 599 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 600 { 601 } 602 603 static void 604 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 605 { 606 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 607 608 g_event_type1 = type; 609 if (SPDK_BDEV_EVENT_REMOVE == type) { 610 spdk_bdev_close(desc); 611 } 612 } 613 614 static void 615 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 616 { 617 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 618 619 g_event_type2 = type; 620 if (SPDK_BDEV_EVENT_REMOVE == type) { 621 spdk_bdev_close(desc); 622 } 623 } 624 625 static void 626 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 627 { 628 g_event_type3 = type; 629 } 630 631 static void 632 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 633 { 634 g_event_type4 = type; 635 } 636 637 static void 638 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 639 { 640 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 641 spdk_bdev_free_io(bdev_io); 642 } 643 644 static void 645 get_device_stat_test(void) 646 { 647 struct spdk_bdev *bdev; 648 struct spdk_bdev_io_stat *stat; 649 bool done; 650 651 bdev = allocate_bdev("bdev0"); 652 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 653 if (stat == NULL) { 654 free_bdev(bdev); 655 return; 656 } 657 658 done = false; 659 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 660 while (!done) { poll_threads(); } 661 662 free_bdev(bdev); 663 } 664 665 static void 666 open_write_test(void) 667 { 668 struct spdk_bdev *bdev[9]; 669 struct spdk_bdev_desc *desc[9] = {}; 670 int rc; 671 672 /* 673 * Create a tree of bdevs to test various open w/ write cases. 674 * 675 * bdev0 through bdev3 are physical block devices, such as NVMe 676 * namespaces or Ceph block devices. 677 * 678 * bdev4 is a virtual bdev with multiple base bdevs. This models 679 * caching or RAID use cases. 680 * 681 * bdev5 through bdev7 are all virtual bdevs with the same base 682 * bdev (except bdev7). This models partitioning or logical volume 683 * use cases. 684 * 685 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 686 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 687 * models caching, RAID, partitioning or logical volumes use cases. 688 * 689 * bdev8 is a virtual bdev with multiple base bdevs, but these 690 * base bdevs are themselves virtual bdevs. 691 * 692 * bdev8 693 * | 694 * +----------+ 695 * | | 696 * bdev4 bdev5 bdev6 bdev7 697 * | | | | 698 * +---+---+ +---+ + +---+---+ 699 * | | \ | / \ 700 * bdev0 bdev1 bdev2 bdev3 701 */ 702 703 bdev[0] = allocate_bdev("bdev0"); 704 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 705 CU_ASSERT(rc == 0); 706 707 bdev[1] = allocate_bdev("bdev1"); 708 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 709 CU_ASSERT(rc == 0); 710 711 bdev[2] = allocate_bdev("bdev2"); 712 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 713 CU_ASSERT(rc == 0); 714 715 bdev[3] = allocate_bdev("bdev3"); 716 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 717 CU_ASSERT(rc == 0); 718 719 bdev[4] = allocate_vbdev("bdev4"); 720 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 721 CU_ASSERT(rc == 0); 722 723 bdev[5] = allocate_vbdev("bdev5"); 724 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 725 CU_ASSERT(rc == 0); 726 727 bdev[6] = allocate_vbdev("bdev6"); 728 729 bdev[7] = allocate_vbdev("bdev7"); 730 731 bdev[8] = allocate_vbdev("bdev8"); 732 733 /* Open bdev0 read-only. This should succeed. */ 734 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 735 CU_ASSERT(rc == 0); 736 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 737 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 738 spdk_bdev_close(desc[0]); 739 740 /* 741 * Open bdev1 read/write. This should fail since bdev1 has been claimed 742 * by a vbdev module. 743 */ 744 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 745 CU_ASSERT(rc == -EPERM); 746 747 /* 748 * Open bdev4 read/write. This should fail since bdev3 has been claimed 749 * by a vbdev module. 750 */ 751 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 752 CU_ASSERT(rc == -EPERM); 753 754 /* Open bdev4 read-only. This should succeed. */ 755 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 756 CU_ASSERT(rc == 0); 757 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 758 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 759 spdk_bdev_close(desc[4]); 760 761 /* 762 * Open bdev8 read/write. This should succeed since it is a leaf 763 * bdev. 764 */ 765 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 766 CU_ASSERT(rc == 0); 767 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 768 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 769 spdk_bdev_close(desc[8]); 770 771 /* 772 * Open bdev5 read/write. This should fail since bdev4 has been claimed 773 * by a vbdev module. 774 */ 775 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 776 CU_ASSERT(rc == -EPERM); 777 778 /* Open bdev4 read-only. This should succeed. */ 779 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 780 CU_ASSERT(rc == 0); 781 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 782 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 783 spdk_bdev_close(desc[5]); 784 785 free_vbdev(bdev[8]); 786 787 free_vbdev(bdev[5]); 788 free_vbdev(bdev[6]); 789 free_vbdev(bdev[7]); 790 791 free_vbdev(bdev[4]); 792 793 free_bdev(bdev[0]); 794 free_bdev(bdev[1]); 795 free_bdev(bdev[2]); 796 free_bdev(bdev[3]); 797 } 798 799 static void 800 claim_test(void) 801 { 802 struct spdk_bdev *bdev; 803 struct spdk_bdev_desc *desc, *open_desc; 804 int rc; 805 uint32_t count; 806 807 /* 808 * A vbdev that uses a read-only bdev may need it to remain read-only. 809 * To do so, it opens the bdev read-only, then claims it without 810 * passing a spdk_bdev_desc. 811 */ 812 bdev = allocate_bdev("bdev0"); 813 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 814 CU_ASSERT(rc == 0); 815 CU_ASSERT(desc->write == false); 816 817 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 818 CU_ASSERT(rc == 0); 819 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 820 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 821 822 /* There should be only one open descriptor and it should still be ro */ 823 count = 0; 824 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 825 CU_ASSERT(open_desc == desc); 826 CU_ASSERT(!open_desc->write); 827 count++; 828 } 829 CU_ASSERT(count == 1); 830 831 /* A read-only bdev is upgraded to read-write if desc is passed. */ 832 spdk_bdev_module_release_bdev(bdev); 833 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 834 CU_ASSERT(rc == 0); 835 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 836 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 837 838 /* There should be only one open descriptor and it should be rw */ 839 count = 0; 840 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 841 CU_ASSERT(open_desc == desc); 842 CU_ASSERT(open_desc->write); 843 count++; 844 } 845 CU_ASSERT(count == 1); 846 847 spdk_bdev_close(desc); 848 free_bdev(bdev); 849 } 850 851 static void 852 bytes_to_blocks_test(void) 853 { 854 struct spdk_bdev bdev; 855 uint64_t offset_blocks, num_blocks; 856 857 memset(&bdev, 0, sizeof(bdev)); 858 859 bdev.blocklen = 512; 860 861 /* All parameters valid */ 862 offset_blocks = 0; 863 num_blocks = 0; 864 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 865 CU_ASSERT(offset_blocks == 1); 866 CU_ASSERT(num_blocks == 2); 867 868 /* Offset not a block multiple */ 869 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 870 871 /* Length not a block multiple */ 872 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 873 874 /* In case blocklen not the power of two */ 875 bdev.blocklen = 100; 876 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 877 CU_ASSERT(offset_blocks == 1); 878 CU_ASSERT(num_blocks == 2); 879 880 /* Offset not a block multiple */ 881 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 882 883 /* Length not a block multiple */ 884 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 885 } 886 887 static void 888 num_blocks_test(void) 889 { 890 struct spdk_bdev bdev; 891 struct spdk_bdev_desc *desc = NULL; 892 int rc; 893 894 memset(&bdev, 0, sizeof(bdev)); 895 bdev.name = "num_blocks"; 896 bdev.fn_table = &fn_table; 897 bdev.module = &bdev_ut_if; 898 spdk_bdev_register(&bdev); 899 poll_threads(); 900 spdk_bdev_notify_blockcnt_change(&bdev, 50); 901 902 /* Growing block number */ 903 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 904 /* Shrinking block number */ 905 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 906 907 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 908 CU_ASSERT(rc == 0); 909 SPDK_CU_ASSERT_FATAL(desc != NULL); 910 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 911 912 /* Growing block number */ 913 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 914 /* Shrinking block number */ 915 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 916 917 g_event_type1 = 0xFF; 918 /* Growing block number */ 919 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 920 921 poll_threads(); 922 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 923 924 g_event_type1 = 0xFF; 925 /* Growing block number and closing */ 926 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 927 928 spdk_bdev_close(desc); 929 spdk_bdev_unregister(&bdev, NULL, NULL); 930 931 poll_threads(); 932 933 /* Callback is not called for closed device */ 934 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 935 } 936 937 static void 938 io_valid_test(void) 939 { 940 struct spdk_bdev bdev; 941 942 memset(&bdev, 0, sizeof(bdev)); 943 944 bdev.blocklen = 512; 945 spdk_spin_init(&bdev.internal.spinlock); 946 947 spdk_bdev_notify_blockcnt_change(&bdev, 100); 948 949 /* All parameters valid */ 950 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 951 952 /* Last valid block */ 953 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 954 955 /* Offset past end of bdev */ 956 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 957 958 /* Offset + length past end of bdev */ 959 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 960 961 /* Offset near end of uint64_t range (2^64 - 1) */ 962 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 963 964 spdk_spin_destroy(&bdev.internal.spinlock); 965 } 966 967 static void 968 alias_add_del_test(void) 969 { 970 struct spdk_bdev *bdev[3]; 971 int rc; 972 973 /* Creating and registering bdevs */ 974 bdev[0] = allocate_bdev("bdev0"); 975 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 976 977 bdev[1] = allocate_bdev("bdev1"); 978 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 979 980 bdev[2] = allocate_bdev("bdev2"); 981 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 982 983 poll_threads(); 984 985 /* 986 * Trying adding an alias identical to name. 987 * Alias is identical to name, so it can not be added to aliases list 988 */ 989 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 990 CU_ASSERT(rc == -EEXIST); 991 992 /* 993 * Trying to add empty alias, 994 * this one should fail 995 */ 996 rc = spdk_bdev_alias_add(bdev[0], NULL); 997 CU_ASSERT(rc == -EINVAL); 998 999 /* Trying adding same alias to two different registered bdevs */ 1000 1001 /* Alias is used first time, so this one should pass */ 1002 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1003 CU_ASSERT(rc == 0); 1004 1005 /* Alias was added to another bdev, so this one should fail */ 1006 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1007 CU_ASSERT(rc == -EEXIST); 1008 1009 /* Alias is used first time, so this one should pass */ 1010 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1011 CU_ASSERT(rc == 0); 1012 1013 /* Trying removing an alias from registered bdevs */ 1014 1015 /* Alias is not on a bdev aliases list, so this one should fail */ 1016 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1017 CU_ASSERT(rc == -ENOENT); 1018 1019 /* Alias is present on a bdev aliases list, so this one should pass */ 1020 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1021 CU_ASSERT(rc == 0); 1022 1023 /* Alias is present on a bdev aliases list, so this one should pass */ 1024 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1025 CU_ASSERT(rc == 0); 1026 1027 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1028 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1029 CU_ASSERT(rc != 0); 1030 1031 /* Trying to del all alias from empty alias list */ 1032 spdk_bdev_alias_del_all(bdev[2]); 1033 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1034 1035 /* Trying to del all alias from non-empty alias list */ 1036 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1037 CU_ASSERT(rc == 0); 1038 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1039 CU_ASSERT(rc == 0); 1040 spdk_bdev_alias_del_all(bdev[2]); 1041 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1042 1043 /* Unregister and free bdevs */ 1044 spdk_bdev_unregister(bdev[0], NULL, NULL); 1045 spdk_bdev_unregister(bdev[1], NULL, NULL); 1046 spdk_bdev_unregister(bdev[2], NULL, NULL); 1047 1048 poll_threads(); 1049 1050 free(bdev[0]); 1051 free(bdev[1]); 1052 free(bdev[2]); 1053 } 1054 1055 static void 1056 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1057 { 1058 g_io_done = true; 1059 g_io_status = bdev_io->internal.status; 1060 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1061 (bdev_io->u.bdev.zcopy.start)) { 1062 g_zcopy_bdev_io = bdev_io; 1063 } else { 1064 spdk_bdev_free_io(bdev_io); 1065 g_zcopy_bdev_io = NULL; 1066 } 1067 } 1068 1069 static void 1070 bdev_init_cb(void *arg, int rc) 1071 { 1072 CU_ASSERT(rc == 0); 1073 } 1074 1075 static void 1076 bdev_fini_cb(void *arg) 1077 { 1078 } 1079 1080 static void 1081 ut_init_bdev(struct spdk_bdev_opts *opts) 1082 { 1083 int rc; 1084 1085 if (opts != NULL) { 1086 rc = spdk_bdev_set_opts(opts); 1087 CU_ASSERT(rc == 0); 1088 } 1089 rc = spdk_iobuf_initialize(); 1090 CU_ASSERT(rc == 0); 1091 spdk_bdev_initialize(bdev_init_cb, NULL); 1092 poll_threads(); 1093 } 1094 1095 static void 1096 ut_fini_bdev(void) 1097 { 1098 spdk_bdev_finish(bdev_fini_cb, NULL); 1099 spdk_iobuf_finish(bdev_fini_cb, NULL); 1100 poll_threads(); 1101 } 1102 1103 struct bdev_ut_io_wait_entry { 1104 struct spdk_bdev_io_wait_entry entry; 1105 struct spdk_io_channel *io_ch; 1106 struct spdk_bdev_desc *desc; 1107 bool submitted; 1108 }; 1109 1110 static void 1111 io_wait_cb(void *arg) 1112 { 1113 struct bdev_ut_io_wait_entry *entry = arg; 1114 int rc; 1115 1116 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1117 CU_ASSERT(rc == 0); 1118 entry->submitted = true; 1119 } 1120 1121 static void 1122 bdev_io_types_test(void) 1123 { 1124 struct spdk_bdev *bdev; 1125 struct spdk_bdev_desc *desc = NULL; 1126 struct spdk_io_channel *io_ch; 1127 struct spdk_bdev_opts bdev_opts = {}; 1128 int rc; 1129 1130 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1131 bdev_opts.bdev_io_pool_size = 4; 1132 bdev_opts.bdev_io_cache_size = 2; 1133 ut_init_bdev(&bdev_opts); 1134 1135 bdev = allocate_bdev("bdev0"); 1136 1137 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1138 CU_ASSERT(rc == 0); 1139 poll_threads(); 1140 SPDK_CU_ASSERT_FATAL(desc != NULL); 1141 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1142 io_ch = spdk_bdev_get_io_channel(desc); 1143 CU_ASSERT(io_ch != NULL); 1144 1145 /* WRITE and WRITE ZEROES are not supported */ 1146 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1147 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1148 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1149 CU_ASSERT(rc == -ENOTSUP); 1150 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1151 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1152 1153 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1154 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1155 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1156 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1157 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1158 CU_ASSERT(rc == -ENOTSUP); 1159 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1160 CU_ASSERT(rc == -ENOTSUP); 1161 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1162 CU_ASSERT(rc == -ENOTSUP); 1163 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1164 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1165 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1166 1167 spdk_put_io_channel(io_ch); 1168 spdk_bdev_close(desc); 1169 free_bdev(bdev); 1170 ut_fini_bdev(); 1171 } 1172 1173 static void 1174 bdev_io_wait_test(void) 1175 { 1176 struct spdk_bdev *bdev; 1177 struct spdk_bdev_desc *desc = NULL; 1178 struct spdk_io_channel *io_ch; 1179 struct spdk_bdev_opts bdev_opts = {}; 1180 struct bdev_ut_io_wait_entry io_wait_entry; 1181 struct bdev_ut_io_wait_entry io_wait_entry2; 1182 int rc; 1183 1184 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1185 bdev_opts.bdev_io_pool_size = 4; 1186 bdev_opts.bdev_io_cache_size = 2; 1187 ut_init_bdev(&bdev_opts); 1188 1189 bdev = allocate_bdev("bdev0"); 1190 1191 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1192 CU_ASSERT(rc == 0); 1193 poll_threads(); 1194 SPDK_CU_ASSERT_FATAL(desc != NULL); 1195 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1196 io_ch = spdk_bdev_get_io_channel(desc); 1197 CU_ASSERT(io_ch != NULL); 1198 1199 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1200 CU_ASSERT(rc == 0); 1201 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1202 CU_ASSERT(rc == 0); 1203 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1204 CU_ASSERT(rc == 0); 1205 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1206 CU_ASSERT(rc == 0); 1207 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1208 1209 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1210 CU_ASSERT(rc == -ENOMEM); 1211 1212 io_wait_entry.entry.bdev = bdev; 1213 io_wait_entry.entry.cb_fn = io_wait_cb; 1214 io_wait_entry.entry.cb_arg = &io_wait_entry; 1215 io_wait_entry.io_ch = io_ch; 1216 io_wait_entry.desc = desc; 1217 io_wait_entry.submitted = false; 1218 /* Cannot use the same io_wait_entry for two different calls. */ 1219 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1220 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1221 1222 /* Queue two I/O waits. */ 1223 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1224 CU_ASSERT(rc == 0); 1225 CU_ASSERT(io_wait_entry.submitted == false); 1226 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1227 CU_ASSERT(rc == 0); 1228 CU_ASSERT(io_wait_entry2.submitted == false); 1229 1230 stub_complete_io(1); 1231 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1232 CU_ASSERT(io_wait_entry.submitted == true); 1233 CU_ASSERT(io_wait_entry2.submitted == false); 1234 1235 stub_complete_io(1); 1236 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1237 CU_ASSERT(io_wait_entry2.submitted == true); 1238 1239 stub_complete_io(4); 1240 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1241 1242 spdk_put_io_channel(io_ch); 1243 spdk_bdev_close(desc); 1244 free_bdev(bdev); 1245 ut_fini_bdev(); 1246 } 1247 1248 static void 1249 bdev_io_spans_split_test(void) 1250 { 1251 struct spdk_bdev bdev; 1252 struct spdk_bdev_io bdev_io; 1253 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1254 1255 memset(&bdev, 0, sizeof(bdev)); 1256 bdev_io.u.bdev.iovs = iov; 1257 1258 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1259 bdev.optimal_io_boundary = 0; 1260 bdev.max_segment_size = 0; 1261 bdev.max_num_segments = 0; 1262 bdev_io.bdev = &bdev; 1263 1264 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1265 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1266 1267 bdev.split_on_optimal_io_boundary = true; 1268 bdev.optimal_io_boundary = 32; 1269 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1270 1271 /* RESETs are not based on LBAs - so this should return false. */ 1272 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1273 1274 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1275 bdev_io.u.bdev.offset_blocks = 0; 1276 bdev_io.u.bdev.num_blocks = 32; 1277 1278 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1279 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1280 1281 bdev_io.u.bdev.num_blocks = 33; 1282 1283 /* This I/O spans a boundary. */ 1284 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1285 1286 bdev_io.u.bdev.num_blocks = 32; 1287 bdev.max_segment_size = 512 * 32; 1288 bdev.max_num_segments = 1; 1289 bdev_io.u.bdev.iovcnt = 1; 1290 iov[0].iov_len = 512; 1291 1292 /* Does not cross and exceed max_size or max_segs */ 1293 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1294 1295 bdev.split_on_optimal_io_boundary = false; 1296 bdev.max_segment_size = 512; 1297 bdev.max_num_segments = 1; 1298 bdev_io.u.bdev.iovcnt = 2; 1299 1300 /* Exceed max_segs */ 1301 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1302 1303 bdev.max_num_segments = 2; 1304 iov[0].iov_len = 513; 1305 iov[1].iov_len = 512; 1306 1307 /* Exceed max_sizes */ 1308 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1309 1310 bdev.max_segment_size = 0; 1311 bdev.write_unit_size = 32; 1312 bdev.split_on_write_unit = true; 1313 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1314 1315 /* This I/O is one write unit */ 1316 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1317 1318 bdev_io.u.bdev.num_blocks = 32 * 2; 1319 1320 /* This I/O is more than one write unit */ 1321 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1322 1323 bdev_io.u.bdev.offset_blocks = 1; 1324 bdev_io.u.bdev.num_blocks = 32; 1325 1326 /* This I/O is not aligned to write unit size */ 1327 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1328 } 1329 1330 static void 1331 bdev_io_boundary_split_test(void) 1332 { 1333 struct spdk_bdev *bdev; 1334 struct spdk_bdev_desc *desc = NULL; 1335 struct spdk_io_channel *io_ch; 1336 struct spdk_bdev_opts bdev_opts = {}; 1337 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1338 struct ut_expected_io *expected_io; 1339 void *md_buf = (void *)0xFF000000; 1340 uint64_t i; 1341 int rc; 1342 1343 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1344 bdev_opts.bdev_io_pool_size = 512; 1345 bdev_opts.bdev_io_cache_size = 64; 1346 ut_init_bdev(&bdev_opts); 1347 1348 bdev = allocate_bdev("bdev0"); 1349 1350 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1351 CU_ASSERT(rc == 0); 1352 SPDK_CU_ASSERT_FATAL(desc != NULL); 1353 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1354 io_ch = spdk_bdev_get_io_channel(desc); 1355 CU_ASSERT(io_ch != NULL); 1356 1357 bdev->optimal_io_boundary = 16; 1358 bdev->split_on_optimal_io_boundary = false; 1359 1360 g_io_done = false; 1361 1362 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1363 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1364 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1365 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1366 1367 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1368 CU_ASSERT(rc == 0); 1369 CU_ASSERT(g_io_done == false); 1370 1371 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1372 stub_complete_io(1); 1373 CU_ASSERT(g_io_done == true); 1374 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1375 1376 bdev->split_on_optimal_io_boundary = true; 1377 bdev->md_interleave = false; 1378 bdev->md_len = 8; 1379 1380 /* Now test that a single-vector command is split correctly. 1381 * Offset 14, length 8, payload 0xF000 1382 * Child - Offset 14, length 2, payload 0xF000 1383 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1384 * 1385 * Set up the expected values before calling spdk_bdev_read_blocks 1386 */ 1387 g_io_done = false; 1388 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1389 expected_io->md_buf = md_buf; 1390 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1391 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1392 1393 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1394 expected_io->md_buf = md_buf + 2 * 8; 1395 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1396 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1397 1398 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1399 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1400 14, 8, io_done, NULL); 1401 CU_ASSERT(rc == 0); 1402 CU_ASSERT(g_io_done == false); 1403 1404 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1405 stub_complete_io(2); 1406 CU_ASSERT(g_io_done == true); 1407 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1408 1409 /* Now set up a more complex, multi-vector command that needs to be split, 1410 * including splitting iovecs. 1411 */ 1412 iov[0].iov_base = (void *)0x10000; 1413 iov[0].iov_len = 512; 1414 iov[1].iov_base = (void *)0x20000; 1415 iov[1].iov_len = 20 * 512; 1416 iov[2].iov_base = (void *)0x30000; 1417 iov[2].iov_len = 11 * 512; 1418 1419 g_io_done = false; 1420 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1421 expected_io->md_buf = md_buf; 1422 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1423 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1425 1426 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1427 expected_io->md_buf = md_buf + 2 * 8; 1428 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1429 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1430 1431 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1432 expected_io->md_buf = md_buf + 18 * 8; 1433 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1434 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1435 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1436 1437 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1438 14, 32, io_done, NULL); 1439 CU_ASSERT(rc == 0); 1440 CU_ASSERT(g_io_done == false); 1441 1442 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1443 stub_complete_io(3); 1444 CU_ASSERT(g_io_done == true); 1445 1446 /* Test multi vector command that needs to be split by strip and then needs to be 1447 * split further due to the capacity of child iovs. 1448 */ 1449 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1450 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1451 iov[i].iov_len = 512; 1452 } 1453 1454 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1455 g_io_done = false; 1456 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1457 SPDK_BDEV_IO_NUM_CHILD_IOV); 1458 expected_io->md_buf = md_buf; 1459 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1460 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1461 } 1462 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1463 1464 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1465 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1466 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1467 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1468 ut_expected_io_set_iov(expected_io, i, 1469 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1470 } 1471 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1472 1473 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1474 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1475 CU_ASSERT(rc == 0); 1476 CU_ASSERT(g_io_done == false); 1477 1478 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1479 stub_complete_io(1); 1480 CU_ASSERT(g_io_done == false); 1481 1482 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1483 stub_complete_io(1); 1484 CU_ASSERT(g_io_done == true); 1485 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1486 1487 /* Test multi vector command that needs to be split by strip and then needs to be 1488 * split further due to the capacity of child iovs. In this case, the length of 1489 * the rest of iovec array with an I/O boundary is the multiple of block size. 1490 */ 1491 1492 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1493 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1494 */ 1495 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1496 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1497 iov[i].iov_len = 512; 1498 } 1499 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1500 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1501 iov[i].iov_len = 256; 1502 } 1503 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1504 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1505 1506 /* Add an extra iovec to trigger split */ 1507 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1508 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1509 1510 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1511 g_io_done = false; 1512 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1513 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1514 expected_io->md_buf = md_buf; 1515 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1516 ut_expected_io_set_iov(expected_io, i, 1517 (void *)((i + 1) * 0x10000), 512); 1518 } 1519 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1520 ut_expected_io_set_iov(expected_io, i, 1521 (void *)((i + 1) * 0x10000), 256); 1522 } 1523 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1524 1525 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1526 1, 1); 1527 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1528 ut_expected_io_set_iov(expected_io, 0, 1529 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1530 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1531 1532 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1533 1, 1); 1534 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1535 ut_expected_io_set_iov(expected_io, 0, 1536 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1537 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1538 1539 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1540 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1541 CU_ASSERT(rc == 0); 1542 CU_ASSERT(g_io_done == false); 1543 1544 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1545 stub_complete_io(1); 1546 CU_ASSERT(g_io_done == false); 1547 1548 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1549 stub_complete_io(2); 1550 CU_ASSERT(g_io_done == true); 1551 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1552 1553 /* Test multi vector command that needs to be split by strip and then needs to be 1554 * split further due to the capacity of child iovs, the child request offset should 1555 * be rewind to last aligned offset and go success without error. 1556 */ 1557 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1558 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1559 iov[i].iov_len = 512; 1560 } 1561 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1562 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1563 1564 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1565 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1566 1567 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1568 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1569 1570 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1571 g_io_done = false; 1572 g_io_status = 0; 1573 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1574 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1575 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1576 expected_io->md_buf = md_buf; 1577 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1578 ut_expected_io_set_iov(expected_io, i, 1579 (void *)((i + 1) * 0x10000), 512); 1580 } 1581 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1582 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1583 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1584 1, 2); 1585 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1586 ut_expected_io_set_iov(expected_io, 0, 1587 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1588 ut_expected_io_set_iov(expected_io, 1, 1589 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1590 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1591 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1592 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1593 1, 1); 1594 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1595 ut_expected_io_set_iov(expected_io, 0, 1596 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1597 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1598 1599 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1600 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1601 CU_ASSERT(rc == 0); 1602 CU_ASSERT(g_io_done == false); 1603 1604 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1605 stub_complete_io(1); 1606 CU_ASSERT(g_io_done == false); 1607 1608 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1609 stub_complete_io(2); 1610 CU_ASSERT(g_io_done == true); 1611 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1612 1613 /* Test multi vector command that needs to be split due to the IO boundary and 1614 * the capacity of child iovs. Especially test the case when the command is 1615 * split due to the capacity of child iovs, the tail address is not aligned with 1616 * block size and is rewinded to the aligned address. 1617 * 1618 * The iovecs used in read request is complex but is based on the data 1619 * collected in the real issue. We change the base addresses but keep the lengths 1620 * not to loose the credibility of the test. 1621 */ 1622 bdev->optimal_io_boundary = 128; 1623 g_io_done = false; 1624 g_io_status = 0; 1625 1626 for (i = 0; i < 31; i++) { 1627 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1628 iov[i].iov_len = 1024; 1629 } 1630 iov[31].iov_base = (void *)0xFEED1F00000; 1631 iov[31].iov_len = 32768; 1632 iov[32].iov_base = (void *)0xFEED2000000; 1633 iov[32].iov_len = 160; 1634 iov[33].iov_base = (void *)0xFEED2100000; 1635 iov[33].iov_len = 4096; 1636 iov[34].iov_base = (void *)0xFEED2200000; 1637 iov[34].iov_len = 4096; 1638 iov[35].iov_base = (void *)0xFEED2300000; 1639 iov[35].iov_len = 4096; 1640 iov[36].iov_base = (void *)0xFEED2400000; 1641 iov[36].iov_len = 4096; 1642 iov[37].iov_base = (void *)0xFEED2500000; 1643 iov[37].iov_len = 4096; 1644 iov[38].iov_base = (void *)0xFEED2600000; 1645 iov[38].iov_len = 4096; 1646 iov[39].iov_base = (void *)0xFEED2700000; 1647 iov[39].iov_len = 4096; 1648 iov[40].iov_base = (void *)0xFEED2800000; 1649 iov[40].iov_len = 4096; 1650 iov[41].iov_base = (void *)0xFEED2900000; 1651 iov[41].iov_len = 4096; 1652 iov[42].iov_base = (void *)0xFEED2A00000; 1653 iov[42].iov_len = 4096; 1654 iov[43].iov_base = (void *)0xFEED2B00000; 1655 iov[43].iov_len = 12288; 1656 iov[44].iov_base = (void *)0xFEED2C00000; 1657 iov[44].iov_len = 8192; 1658 iov[45].iov_base = (void *)0xFEED2F00000; 1659 iov[45].iov_len = 4096; 1660 iov[46].iov_base = (void *)0xFEED3000000; 1661 iov[46].iov_len = 4096; 1662 iov[47].iov_base = (void *)0xFEED3100000; 1663 iov[47].iov_len = 4096; 1664 iov[48].iov_base = (void *)0xFEED3200000; 1665 iov[48].iov_len = 24576; 1666 iov[49].iov_base = (void *)0xFEED3300000; 1667 iov[49].iov_len = 16384; 1668 iov[50].iov_base = (void *)0xFEED3400000; 1669 iov[50].iov_len = 12288; 1670 iov[51].iov_base = (void *)0xFEED3500000; 1671 iov[51].iov_len = 4096; 1672 iov[52].iov_base = (void *)0xFEED3600000; 1673 iov[52].iov_len = 4096; 1674 iov[53].iov_base = (void *)0xFEED3700000; 1675 iov[53].iov_len = 4096; 1676 iov[54].iov_base = (void *)0xFEED3800000; 1677 iov[54].iov_len = 28672; 1678 iov[55].iov_base = (void *)0xFEED3900000; 1679 iov[55].iov_len = 20480; 1680 iov[56].iov_base = (void *)0xFEED3A00000; 1681 iov[56].iov_len = 4096; 1682 iov[57].iov_base = (void *)0xFEED3B00000; 1683 iov[57].iov_len = 12288; 1684 iov[58].iov_base = (void *)0xFEED3C00000; 1685 iov[58].iov_len = 4096; 1686 iov[59].iov_base = (void *)0xFEED3D00000; 1687 iov[59].iov_len = 4096; 1688 iov[60].iov_base = (void *)0xFEED3E00000; 1689 iov[60].iov_len = 352; 1690 1691 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1692 * of child iovs, 1693 */ 1694 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1695 expected_io->md_buf = md_buf; 1696 for (i = 0; i < 32; i++) { 1697 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1698 } 1699 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1700 1701 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1702 * split by the IO boundary requirement. 1703 */ 1704 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1705 expected_io->md_buf = md_buf + 126 * 8; 1706 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1707 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1708 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1709 1710 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1711 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1712 */ 1713 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1714 expected_io->md_buf = md_buf + 128 * 8; 1715 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1716 iov[33].iov_len - 864); 1717 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1718 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1719 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1720 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1721 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1722 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1723 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1724 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1725 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1726 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1727 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1728 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1729 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1730 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1731 1732 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1733 * first 864 bytes of iov[52] split by the IO boundary requirement. 1734 */ 1735 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1736 expected_io->md_buf = md_buf + 256 * 8; 1737 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1738 iov[46].iov_len - 864); 1739 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1740 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1741 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1742 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1743 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1744 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1745 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1746 1747 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1748 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1749 */ 1750 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1751 expected_io->md_buf = md_buf + 384 * 8; 1752 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1753 iov[52].iov_len - 864); 1754 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1755 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1756 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1757 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1758 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1759 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1760 1761 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1762 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1763 */ 1764 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1765 expected_io->md_buf = md_buf + 512 * 8; 1766 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1767 iov[57].iov_len - 4960); 1768 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1769 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1770 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1771 1772 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1773 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1774 expected_io->md_buf = md_buf + 542 * 8; 1775 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1776 iov[59].iov_len - 3936); 1777 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1778 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1779 1780 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1781 0, 543, io_done, NULL); 1782 CU_ASSERT(rc == 0); 1783 CU_ASSERT(g_io_done == false); 1784 1785 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1786 stub_complete_io(1); 1787 CU_ASSERT(g_io_done == false); 1788 1789 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1790 stub_complete_io(5); 1791 CU_ASSERT(g_io_done == false); 1792 1793 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1794 stub_complete_io(1); 1795 CU_ASSERT(g_io_done == true); 1796 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1797 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1798 1799 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1800 * split, so test that. 1801 */ 1802 bdev->optimal_io_boundary = 15; 1803 g_io_done = false; 1804 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1805 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1806 1807 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1808 CU_ASSERT(rc == 0); 1809 CU_ASSERT(g_io_done == false); 1810 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1811 stub_complete_io(1); 1812 CU_ASSERT(g_io_done == true); 1813 1814 /* Test an UNMAP. This should also not be split. */ 1815 bdev->optimal_io_boundary = 16; 1816 g_io_done = false; 1817 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1818 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1819 1820 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1821 CU_ASSERT(rc == 0); 1822 CU_ASSERT(g_io_done == false); 1823 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1824 stub_complete_io(1); 1825 CU_ASSERT(g_io_done == true); 1826 1827 /* Test a FLUSH. This should also not be split. */ 1828 bdev->optimal_io_boundary = 16; 1829 g_io_done = false; 1830 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1831 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1832 1833 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1834 CU_ASSERT(rc == 0); 1835 CU_ASSERT(g_io_done == false); 1836 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1837 stub_complete_io(1); 1838 CU_ASSERT(g_io_done == true); 1839 1840 /* Test a COPY. This should also not be split. */ 1841 bdev->optimal_io_boundary = 15; 1842 g_io_done = false; 1843 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1844 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1845 1846 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1847 CU_ASSERT(rc == 0); 1848 CU_ASSERT(g_io_done == false); 1849 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1850 stub_complete_io(1); 1851 CU_ASSERT(g_io_done == true); 1852 1853 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1854 1855 /* Children requests return an error status */ 1856 bdev->optimal_io_boundary = 16; 1857 iov[0].iov_base = (void *)0x10000; 1858 iov[0].iov_len = 512 * 64; 1859 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1860 g_io_done = false; 1861 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1862 1863 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1864 CU_ASSERT(rc == 0); 1865 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1866 stub_complete_io(4); 1867 CU_ASSERT(g_io_done == false); 1868 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1869 stub_complete_io(1); 1870 CU_ASSERT(g_io_done == true); 1871 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1872 1873 /* Test if a multi vector command terminated with failure before continuing 1874 * splitting process when one of child I/O failed. 1875 * The multi vector command is as same as the above that needs to be split by strip 1876 * and then needs to be split further due to the capacity of child iovs. 1877 */ 1878 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1879 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1880 iov[i].iov_len = 512; 1881 } 1882 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1883 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1884 1885 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1886 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1887 1888 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1889 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1890 1891 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1892 1893 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1894 g_io_done = false; 1895 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1896 1897 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1898 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1899 CU_ASSERT(rc == 0); 1900 CU_ASSERT(g_io_done == false); 1901 1902 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1903 stub_complete_io(1); 1904 CU_ASSERT(g_io_done == true); 1905 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1906 1907 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1908 1909 /* for this test we will create the following conditions to hit the code path where 1910 * we are trying to send and IO following a split that has no iovs because we had to 1911 * trim them for alignment reasons. 1912 * 1913 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1914 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1915 * position 30 and overshoot by 0x2e. 1916 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1917 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1918 * which eliniates that vector so we just send the first split IO with 30 vectors 1919 * and let the completion pick up the last 2 vectors. 1920 */ 1921 bdev->optimal_io_boundary = 32; 1922 bdev->split_on_optimal_io_boundary = true; 1923 g_io_done = false; 1924 1925 /* Init all parent IOVs to 0x212 */ 1926 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1927 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1928 iov[i].iov_len = 0x212; 1929 } 1930 1931 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1932 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1933 /* expect 0-29 to be 1:1 with the parent iov */ 1934 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1935 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1936 } 1937 1938 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1939 * where 0x1e is the amount we overshot the 16K boundary 1940 */ 1941 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1942 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1943 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1944 1945 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1946 * shortened that take it to the next boundary and then a final one to get us to 1947 * 0x4200 bytes for the IO. 1948 */ 1949 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1950 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 1951 /* position 30 picked up the remaining bytes to the next boundary */ 1952 ut_expected_io_set_iov(expected_io, 0, 1953 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1954 1955 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1956 ut_expected_io_set_iov(expected_io, 1, 1957 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1958 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1959 1960 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 1961 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1962 CU_ASSERT(rc == 0); 1963 CU_ASSERT(g_io_done == false); 1964 1965 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1966 stub_complete_io(1); 1967 CU_ASSERT(g_io_done == false); 1968 1969 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1970 stub_complete_io(1); 1971 CU_ASSERT(g_io_done == true); 1972 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1973 1974 spdk_put_io_channel(io_ch); 1975 spdk_bdev_close(desc); 1976 free_bdev(bdev); 1977 ut_fini_bdev(); 1978 } 1979 1980 static void 1981 bdev_io_max_size_and_segment_split_test(void) 1982 { 1983 struct spdk_bdev *bdev; 1984 struct spdk_bdev_desc *desc = NULL; 1985 struct spdk_io_channel *io_ch; 1986 struct spdk_bdev_opts bdev_opts = {}; 1987 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1988 struct ut_expected_io *expected_io; 1989 uint64_t i; 1990 int rc; 1991 1992 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1993 bdev_opts.bdev_io_pool_size = 512; 1994 bdev_opts.bdev_io_cache_size = 64; 1995 bdev_opts.opts_size = sizeof(bdev_opts); 1996 ut_init_bdev(&bdev_opts); 1997 1998 bdev = allocate_bdev("bdev0"); 1999 2000 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2001 CU_ASSERT(rc == 0); 2002 SPDK_CU_ASSERT_FATAL(desc != NULL); 2003 io_ch = spdk_bdev_get_io_channel(desc); 2004 CU_ASSERT(io_ch != NULL); 2005 2006 bdev->split_on_optimal_io_boundary = false; 2007 bdev->optimal_io_boundary = 0; 2008 2009 /* Case 0 max_num_segments == 0. 2010 * but segment size 2 * 512 > 512 2011 */ 2012 bdev->max_segment_size = 512; 2013 bdev->max_num_segments = 0; 2014 g_io_done = false; 2015 2016 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2017 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2018 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2019 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2020 2021 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2022 CU_ASSERT(rc == 0); 2023 CU_ASSERT(g_io_done == false); 2024 2025 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2026 stub_complete_io(1); 2027 CU_ASSERT(g_io_done == true); 2028 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2029 2030 /* Case 1 max_segment_size == 0 2031 * but iov num 2 > 1. 2032 */ 2033 bdev->max_segment_size = 0; 2034 bdev->max_num_segments = 1; 2035 g_io_done = false; 2036 2037 iov[0].iov_base = (void *)0x10000; 2038 iov[0].iov_len = 512; 2039 iov[1].iov_base = (void *)0x20000; 2040 iov[1].iov_len = 8 * 512; 2041 2042 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2043 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2044 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2045 2046 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2047 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2048 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2049 2050 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2051 CU_ASSERT(rc == 0); 2052 CU_ASSERT(g_io_done == false); 2053 2054 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2055 stub_complete_io(2); 2056 CU_ASSERT(g_io_done == true); 2057 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2058 2059 /* Test that a non-vector command is split correctly. 2060 * Set up the expected values before calling spdk_bdev_read_blocks 2061 */ 2062 bdev->max_segment_size = 512; 2063 bdev->max_num_segments = 1; 2064 g_io_done = false; 2065 2066 /* Child IO 0 */ 2067 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2068 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2069 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2070 2071 /* Child IO 1 */ 2072 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2073 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2074 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2075 2076 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2077 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2078 CU_ASSERT(rc == 0); 2079 CU_ASSERT(g_io_done == false); 2080 2081 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2082 stub_complete_io(2); 2083 CU_ASSERT(g_io_done == true); 2084 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2085 2086 /* Now set up a more complex, multi-vector command that needs to be split, 2087 * including splitting iovecs. 2088 */ 2089 bdev->max_segment_size = 2 * 512; 2090 bdev->max_num_segments = 1; 2091 g_io_done = false; 2092 2093 iov[0].iov_base = (void *)0x10000; 2094 iov[0].iov_len = 2 * 512; 2095 iov[1].iov_base = (void *)0x20000; 2096 iov[1].iov_len = 4 * 512; 2097 iov[2].iov_base = (void *)0x30000; 2098 iov[2].iov_len = 6 * 512; 2099 2100 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2101 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2102 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2103 2104 /* Split iov[1].size to 2 iov entries then split the segments */ 2105 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2106 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2107 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2108 2109 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2110 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2111 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2112 2113 /* Split iov[2].size to 3 iov entries then split the segments */ 2114 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2115 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2116 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2117 2118 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2119 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2120 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2121 2122 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2123 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2124 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2125 2126 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2127 CU_ASSERT(rc == 0); 2128 CU_ASSERT(g_io_done == false); 2129 2130 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2131 stub_complete_io(6); 2132 CU_ASSERT(g_io_done == true); 2133 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2134 2135 /* Test multi vector command that needs to be split by strip and then needs to be 2136 * split further due to the capacity of parent IO child iovs. 2137 */ 2138 bdev->max_segment_size = 512; 2139 bdev->max_num_segments = 1; 2140 g_io_done = false; 2141 2142 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2143 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2144 iov[i].iov_len = 512 * 2; 2145 } 2146 2147 /* Each input iov.size is split into 2 iovs, 2148 * half of the input iov can fill all child iov entries of a single IO. 2149 */ 2150 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2151 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2152 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2153 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2154 2155 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2156 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2157 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2158 } 2159 2160 /* The remaining iov is split in the second round */ 2161 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2162 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2163 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2164 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2165 2166 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2167 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2168 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2169 } 2170 2171 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2172 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2173 CU_ASSERT(rc == 0); 2174 CU_ASSERT(g_io_done == false); 2175 2176 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2177 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2178 CU_ASSERT(g_io_done == false); 2179 2180 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2181 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2182 CU_ASSERT(g_io_done == true); 2183 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2184 2185 /* A wrong case, a child IO that is divided does 2186 * not meet the principle of multiples of block size, 2187 * and exits with error 2188 */ 2189 bdev->max_segment_size = 512; 2190 bdev->max_num_segments = 1; 2191 g_io_done = false; 2192 2193 iov[0].iov_base = (void *)0x10000; 2194 iov[0].iov_len = 512 + 256; 2195 iov[1].iov_base = (void *)0x20000; 2196 iov[1].iov_len = 256; 2197 2198 /* iov[0] is split to 512 and 256. 2199 * 256 is less than a block size, and it is found 2200 * in the next round of split that it is the first child IO smaller than 2201 * the block size, so the error exit 2202 */ 2203 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2204 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2205 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2206 2207 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2208 CU_ASSERT(rc == 0); 2209 CU_ASSERT(g_io_done == false); 2210 2211 /* First child IO is OK */ 2212 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2213 stub_complete_io(1); 2214 CU_ASSERT(g_io_done == true); 2215 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2216 2217 /* error exit */ 2218 stub_complete_io(1); 2219 CU_ASSERT(g_io_done == true); 2220 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2221 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2222 2223 /* Test multi vector command that needs to be split by strip and then needs to be 2224 * split further due to the capacity of child iovs. 2225 * 2226 * In this case, the last two iovs need to be split, but it will exceed the capacity 2227 * of child iovs, so it needs to wait until the first batch completed. 2228 */ 2229 bdev->max_segment_size = 512; 2230 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2231 g_io_done = false; 2232 2233 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2234 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2235 iov[i].iov_len = 512; 2236 } 2237 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2238 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2239 iov[i].iov_len = 512 * 2; 2240 } 2241 2242 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2243 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2244 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2245 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2246 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2247 } 2248 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2249 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2250 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2251 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2252 2253 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2254 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2255 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2256 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2257 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2258 2259 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2260 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2261 CU_ASSERT(rc == 0); 2262 CU_ASSERT(g_io_done == false); 2263 2264 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2265 stub_complete_io(1); 2266 CU_ASSERT(g_io_done == false); 2267 2268 /* Next round */ 2269 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2270 stub_complete_io(1); 2271 CU_ASSERT(g_io_done == true); 2272 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2273 2274 /* This case is similar to the previous one, but the io composed of 2275 * the last few entries of child iov is not enough for a blocklen, so they 2276 * cannot be put into this IO, but wait until the next time. 2277 */ 2278 bdev->max_segment_size = 512; 2279 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2280 g_io_done = false; 2281 2282 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2283 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2284 iov[i].iov_len = 512; 2285 } 2286 2287 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2288 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2289 iov[i].iov_len = 128; 2290 } 2291 2292 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2293 * Because the left 2 iov is not enough for a blocklen. 2294 */ 2295 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2296 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2297 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2298 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2299 } 2300 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2301 2302 /* The second child io waits until the end of the first child io before executing. 2303 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2304 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2305 */ 2306 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2307 1, 4); 2308 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2309 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2310 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2311 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2312 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2313 2314 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2315 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2316 CU_ASSERT(rc == 0); 2317 CU_ASSERT(g_io_done == false); 2318 2319 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2320 stub_complete_io(1); 2321 CU_ASSERT(g_io_done == false); 2322 2323 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2324 stub_complete_io(1); 2325 CU_ASSERT(g_io_done == true); 2326 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2327 2328 /* A very complicated case. Each sg entry exceeds max_segment_size and 2329 * needs to be split. At the same time, child io must be a multiple of blocklen. 2330 * At the same time, child iovcnt exceeds parent iovcnt. 2331 */ 2332 bdev->max_segment_size = 512 + 128; 2333 bdev->max_num_segments = 3; 2334 g_io_done = false; 2335 2336 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2337 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2338 iov[i].iov_len = 512 + 256; 2339 } 2340 2341 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2342 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2343 iov[i].iov_len = 512 + 128; 2344 } 2345 2346 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2347 * Consume 4 parent IO iov entries per for() round and 6 block size. 2348 * Generate 9 child IOs. 2349 */ 2350 for (i = 0; i < 3; i++) { 2351 uint32_t j = i * 4; 2352 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2353 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2354 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2355 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2356 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2357 2358 /* Child io must be a multiple of blocklen 2359 * iov[j + 2] must be split. If the third entry is also added, 2360 * the multiple of blocklen cannot be guaranteed. But it still 2361 * occupies one iov entry of the parent child iov. 2362 */ 2363 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2364 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2365 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2366 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2367 2368 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2369 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2370 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2371 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2372 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2373 } 2374 2375 /* Child iov position at 27, the 10th child IO 2376 * iov entry index is 3 * 4 and offset is 3 * 6 2377 */ 2378 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2379 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2380 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2381 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2382 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2383 2384 /* Child iov position at 30, the 11th child IO */ 2385 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2386 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2387 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2388 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2389 2390 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2391 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2392 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2393 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2394 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2395 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2396 2397 /* Consume 9 child IOs and 27 child iov entries. 2398 * Consume 4 parent IO iov entries per for() round and 6 block size. 2399 * Parent IO iov index start from 16 and block offset start from 24 2400 */ 2401 for (i = 0; i < 3; i++) { 2402 uint32_t j = i * 4 + 16; 2403 uint32_t offset = i * 6 + 24; 2404 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2405 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2406 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2407 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2408 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2409 2410 /* Child io must be a multiple of blocklen 2411 * iov[j + 2] must be split. If the third entry is also added, 2412 * the multiple of blocklen cannot be guaranteed. But it still 2413 * occupies one iov entry of the parent child iov. 2414 */ 2415 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2416 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2417 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2418 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2419 2420 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2421 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2422 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2423 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2425 } 2426 2427 /* The 22th child IO, child iov position at 30 */ 2428 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2429 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2431 2432 /* The third round */ 2433 /* Here is the 23nd child IO and child iovpos is 0 */ 2434 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2435 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2436 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2437 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2438 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2439 2440 /* The 24th child IO */ 2441 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2442 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2443 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2444 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2445 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2446 2447 /* The 25th child IO */ 2448 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2449 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2450 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2451 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2452 2453 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2454 50, io_done, NULL); 2455 CU_ASSERT(rc == 0); 2456 CU_ASSERT(g_io_done == false); 2457 2458 /* Parent IO supports up to 32 child iovs, so it is calculated that 2459 * a maximum of 11 IOs can be split at a time, and the 2460 * splitting will continue after the first batch is over. 2461 */ 2462 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2463 stub_complete_io(11); 2464 CU_ASSERT(g_io_done == false); 2465 2466 /* The 2nd round */ 2467 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2468 stub_complete_io(11); 2469 CU_ASSERT(g_io_done == false); 2470 2471 /* The last round */ 2472 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2473 stub_complete_io(3); 2474 CU_ASSERT(g_io_done == true); 2475 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2476 2477 /* Test an WRITE_ZEROES. This should also not be split. */ 2478 bdev->max_segment_size = 512; 2479 bdev->max_num_segments = 1; 2480 g_io_done = false; 2481 2482 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2483 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2484 2485 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2486 CU_ASSERT(rc == 0); 2487 CU_ASSERT(g_io_done == false); 2488 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2489 stub_complete_io(1); 2490 CU_ASSERT(g_io_done == true); 2491 2492 /* Test an UNMAP. This should also not be split. */ 2493 g_io_done = false; 2494 2495 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2496 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2497 2498 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2499 CU_ASSERT(rc == 0); 2500 CU_ASSERT(g_io_done == false); 2501 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2502 stub_complete_io(1); 2503 CU_ASSERT(g_io_done == true); 2504 2505 /* Test a FLUSH. This should also not be split. */ 2506 g_io_done = false; 2507 2508 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2509 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2510 2511 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2512 CU_ASSERT(rc == 0); 2513 CU_ASSERT(g_io_done == false); 2514 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2515 stub_complete_io(1); 2516 CU_ASSERT(g_io_done == true); 2517 2518 /* Test a COPY. This should also not be split. */ 2519 g_io_done = false; 2520 2521 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2522 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2523 2524 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2525 CU_ASSERT(rc == 0); 2526 CU_ASSERT(g_io_done == false); 2527 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2528 stub_complete_io(1); 2529 CU_ASSERT(g_io_done == true); 2530 2531 spdk_put_io_channel(io_ch); 2532 spdk_bdev_close(desc); 2533 free_bdev(bdev); 2534 ut_fini_bdev(); 2535 } 2536 2537 static void 2538 bdev_io_mix_split_test(void) 2539 { 2540 struct spdk_bdev *bdev; 2541 struct spdk_bdev_desc *desc = NULL; 2542 struct spdk_io_channel *io_ch; 2543 struct spdk_bdev_opts bdev_opts = {}; 2544 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2545 struct ut_expected_io *expected_io; 2546 uint64_t i; 2547 int rc; 2548 2549 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2550 bdev_opts.bdev_io_pool_size = 512; 2551 bdev_opts.bdev_io_cache_size = 64; 2552 ut_init_bdev(&bdev_opts); 2553 2554 bdev = allocate_bdev("bdev0"); 2555 2556 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2557 CU_ASSERT(rc == 0); 2558 SPDK_CU_ASSERT_FATAL(desc != NULL); 2559 io_ch = spdk_bdev_get_io_channel(desc); 2560 CU_ASSERT(io_ch != NULL); 2561 2562 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2563 bdev->split_on_optimal_io_boundary = true; 2564 bdev->optimal_io_boundary = 16; 2565 2566 bdev->max_segment_size = 512; 2567 bdev->max_num_segments = 16; 2568 g_io_done = false; 2569 2570 /* IO crossing the IO boundary requires split 2571 * Total 2 child IOs. 2572 */ 2573 2574 /* The 1st child IO split the segment_size to multiple segment entry */ 2575 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2576 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2577 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2578 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2579 2580 /* The 2nd child IO split the segment_size to multiple segment entry */ 2581 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2582 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2583 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2584 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2585 2586 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2587 CU_ASSERT(rc == 0); 2588 CU_ASSERT(g_io_done == false); 2589 2590 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2591 stub_complete_io(2); 2592 CU_ASSERT(g_io_done == true); 2593 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2594 2595 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2596 bdev->max_segment_size = 15 * 512; 2597 bdev->max_num_segments = 1; 2598 g_io_done = false; 2599 2600 /* IO crossing the IO boundary requires split. 2601 * The 1st child IO segment size exceeds the max_segment_size, 2602 * So 1st child IO will be split to multiple segment entry. 2603 * Then it split to 2 child IOs because of the max_num_segments. 2604 * Total 3 child IOs. 2605 */ 2606 2607 /* The first 2 IOs are in an IO boundary. 2608 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2609 * So it split to the first 2 IOs. 2610 */ 2611 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2612 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2614 2615 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2616 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2617 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2618 2619 /* The 3rd Child IO is because of the io boundary */ 2620 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2621 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2622 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2623 2624 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2625 CU_ASSERT(rc == 0); 2626 CU_ASSERT(g_io_done == false); 2627 2628 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2629 stub_complete_io(3); 2630 CU_ASSERT(g_io_done == true); 2631 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2632 2633 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2634 bdev->max_segment_size = 17 * 512; 2635 bdev->max_num_segments = 1; 2636 g_io_done = false; 2637 2638 /* IO crossing the IO boundary requires split. 2639 * Child IO does not split. 2640 * Total 2 child IOs. 2641 */ 2642 2643 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2644 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2645 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2646 2647 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2648 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2649 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2650 2651 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2652 CU_ASSERT(rc == 0); 2653 CU_ASSERT(g_io_done == false); 2654 2655 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2656 stub_complete_io(2); 2657 CU_ASSERT(g_io_done == true); 2658 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2659 2660 /* Now set up a more complex, multi-vector command that needs to be split, 2661 * including splitting iovecs. 2662 * optimal_io_boundary < max_segment_size * max_num_segments 2663 */ 2664 bdev->max_segment_size = 3 * 512; 2665 bdev->max_num_segments = 6; 2666 g_io_done = false; 2667 2668 iov[0].iov_base = (void *)0x10000; 2669 iov[0].iov_len = 4 * 512; 2670 iov[1].iov_base = (void *)0x20000; 2671 iov[1].iov_len = 4 * 512; 2672 iov[2].iov_base = (void *)0x30000; 2673 iov[2].iov_len = 10 * 512; 2674 2675 /* IO crossing the IO boundary requires split. 2676 * The 1st child IO segment size exceeds the max_segment_size and after 2677 * splitting segment_size, the num_segments exceeds max_num_segments. 2678 * So 1st child IO will be split to 2 child IOs. 2679 * Total 3 child IOs. 2680 */ 2681 2682 /* The first 2 IOs are in an IO boundary. 2683 * After splitting segment size the segment num exceeds. 2684 * So it splits to 2 child IOs. 2685 */ 2686 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2687 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2688 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2689 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2690 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2691 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2692 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2693 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2694 2695 /* The 2nd child IO has the left segment entry */ 2696 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2697 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2698 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2699 2700 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2701 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2702 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2703 2704 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2705 CU_ASSERT(rc == 0); 2706 CU_ASSERT(g_io_done == false); 2707 2708 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2709 stub_complete_io(3); 2710 CU_ASSERT(g_io_done == true); 2711 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2712 2713 /* A very complicated case. Each sg entry exceeds max_segment_size 2714 * and split on io boundary. 2715 * optimal_io_boundary < max_segment_size * max_num_segments 2716 */ 2717 bdev->max_segment_size = 3 * 512; 2718 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2719 g_io_done = false; 2720 2721 for (i = 0; i < 20; i++) { 2722 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2723 iov[i].iov_len = 512 * 4; 2724 } 2725 2726 /* IO crossing the IO boundary requires split. 2727 * 80 block length can split 5 child IOs base on offset and IO boundary. 2728 * Each iov entry needs to be split to 2 entries because of max_segment_size 2729 * Total 5 child IOs. 2730 */ 2731 2732 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2733 * So each child IO occupies 8 child iov entries. 2734 */ 2735 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2736 for (i = 0; i < 4; i++) { 2737 int iovcnt = i * 2; 2738 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2739 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2740 } 2741 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2742 2743 /* 2nd child IO and total 16 child iov entries of parent IO */ 2744 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2745 for (i = 4; i < 8; i++) { 2746 int iovcnt = (i - 4) * 2; 2747 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2748 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2749 } 2750 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2751 2752 /* 3rd child IO and total 24 child iov entries of parent IO */ 2753 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2754 for (i = 8; i < 12; i++) { 2755 int iovcnt = (i - 8) * 2; 2756 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2757 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2758 } 2759 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2760 2761 /* 4th child IO and total 32 child iov entries of parent IO */ 2762 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2763 for (i = 12; i < 16; i++) { 2764 int iovcnt = (i - 12) * 2; 2765 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2766 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2767 } 2768 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2769 2770 /* 5th child IO and because of the child iov entry it should be split 2771 * in next round. 2772 */ 2773 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2774 for (i = 16; i < 20; i++) { 2775 int iovcnt = (i - 16) * 2; 2776 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2777 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2778 } 2779 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2780 2781 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2782 CU_ASSERT(rc == 0); 2783 CU_ASSERT(g_io_done == false); 2784 2785 /* First split round */ 2786 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2787 stub_complete_io(4); 2788 CU_ASSERT(g_io_done == false); 2789 2790 /* Second split round */ 2791 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2792 stub_complete_io(1); 2793 CU_ASSERT(g_io_done == true); 2794 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2795 2796 spdk_put_io_channel(io_ch); 2797 spdk_bdev_close(desc); 2798 free_bdev(bdev); 2799 ut_fini_bdev(); 2800 } 2801 2802 static void 2803 bdev_io_split_with_io_wait(void) 2804 { 2805 struct spdk_bdev *bdev; 2806 struct spdk_bdev_desc *desc = NULL; 2807 struct spdk_io_channel *io_ch; 2808 struct spdk_bdev_channel *channel; 2809 struct spdk_bdev_mgmt_channel *mgmt_ch; 2810 struct spdk_bdev_opts bdev_opts = {}; 2811 struct iovec iov[3]; 2812 struct ut_expected_io *expected_io; 2813 int rc; 2814 2815 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2816 bdev_opts.bdev_io_pool_size = 2; 2817 bdev_opts.bdev_io_cache_size = 1; 2818 ut_init_bdev(&bdev_opts); 2819 2820 bdev = allocate_bdev("bdev0"); 2821 2822 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2823 CU_ASSERT(rc == 0); 2824 CU_ASSERT(desc != NULL); 2825 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2826 io_ch = spdk_bdev_get_io_channel(desc); 2827 CU_ASSERT(io_ch != NULL); 2828 channel = spdk_io_channel_get_ctx(io_ch); 2829 mgmt_ch = channel->shared_resource->mgmt_ch; 2830 2831 bdev->optimal_io_boundary = 16; 2832 bdev->split_on_optimal_io_boundary = true; 2833 2834 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2835 CU_ASSERT(rc == 0); 2836 2837 /* Now test that a single-vector command is split correctly. 2838 * Offset 14, length 8, payload 0xF000 2839 * Child - Offset 14, length 2, payload 0xF000 2840 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2841 * 2842 * Set up the expected values before calling spdk_bdev_read_blocks 2843 */ 2844 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2845 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2846 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2847 2848 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2849 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2850 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2851 2852 /* The following children will be submitted sequentially due to the capacity of 2853 * spdk_bdev_io. 2854 */ 2855 2856 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2857 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2858 CU_ASSERT(rc == 0); 2859 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2860 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2861 2862 /* Completing the first read I/O will submit the first child */ 2863 stub_complete_io(1); 2864 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2865 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2866 2867 /* Completing the first child will submit the second child */ 2868 stub_complete_io(1); 2869 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2870 2871 /* Complete the second child I/O. This should result in our callback getting 2872 * invoked since the parent I/O is now complete. 2873 */ 2874 stub_complete_io(1); 2875 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2876 2877 /* Now set up a more complex, multi-vector command that needs to be split, 2878 * including splitting iovecs. 2879 */ 2880 iov[0].iov_base = (void *)0x10000; 2881 iov[0].iov_len = 512; 2882 iov[1].iov_base = (void *)0x20000; 2883 iov[1].iov_len = 20 * 512; 2884 iov[2].iov_base = (void *)0x30000; 2885 iov[2].iov_len = 11 * 512; 2886 2887 g_io_done = false; 2888 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2889 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2890 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2891 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2892 2893 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2894 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2895 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2896 2897 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2898 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2899 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2900 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2901 2902 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2903 CU_ASSERT(rc == 0); 2904 CU_ASSERT(g_io_done == false); 2905 2906 /* The following children will be submitted sequentially due to the capacity of 2907 * spdk_bdev_io. 2908 */ 2909 2910 /* Completing the first child will submit the second child */ 2911 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2912 stub_complete_io(1); 2913 CU_ASSERT(g_io_done == false); 2914 2915 /* Completing the second child will submit the third child */ 2916 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2917 stub_complete_io(1); 2918 CU_ASSERT(g_io_done == false); 2919 2920 /* Completing the third child will result in our callback getting invoked 2921 * since the parent I/O is now complete. 2922 */ 2923 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2924 stub_complete_io(1); 2925 CU_ASSERT(g_io_done == true); 2926 2927 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2928 2929 spdk_put_io_channel(io_ch); 2930 spdk_bdev_close(desc); 2931 free_bdev(bdev); 2932 ut_fini_bdev(); 2933 } 2934 2935 static void 2936 bdev_io_write_unit_split_test(void) 2937 { 2938 struct spdk_bdev *bdev; 2939 struct spdk_bdev_desc *desc = NULL; 2940 struct spdk_io_channel *io_ch; 2941 struct spdk_bdev_opts bdev_opts = {}; 2942 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 2943 struct ut_expected_io *expected_io; 2944 uint64_t i; 2945 int rc; 2946 2947 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2948 bdev_opts.bdev_io_pool_size = 512; 2949 bdev_opts.bdev_io_cache_size = 64; 2950 ut_init_bdev(&bdev_opts); 2951 2952 bdev = allocate_bdev("bdev0"); 2953 2954 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2955 CU_ASSERT(rc == 0); 2956 SPDK_CU_ASSERT_FATAL(desc != NULL); 2957 io_ch = spdk_bdev_get_io_channel(desc); 2958 CU_ASSERT(io_ch != NULL); 2959 2960 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 2961 bdev->write_unit_size = 32; 2962 bdev->split_on_write_unit = true; 2963 g_io_done = false; 2964 2965 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 2966 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 2967 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2968 2969 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 2970 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 2971 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2972 2973 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 2974 CU_ASSERT(rc == 0); 2975 CU_ASSERT(g_io_done == false); 2976 2977 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2978 stub_complete_io(2); 2979 CU_ASSERT(g_io_done == true); 2980 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2981 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2982 2983 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 2984 * based on write_unit_size, not optimal_io_boundary */ 2985 bdev->split_on_optimal_io_boundary = true; 2986 bdev->optimal_io_boundary = 16; 2987 g_io_done = false; 2988 2989 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 2990 CU_ASSERT(rc == 0); 2991 CU_ASSERT(g_io_done == false); 2992 2993 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2994 stub_complete_io(2); 2995 CU_ASSERT(g_io_done == true); 2996 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2997 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2998 2999 /* Write I/O should fail if it is smaller than write_unit_size */ 3000 g_io_done = false; 3001 3002 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3003 CU_ASSERT(rc == 0); 3004 CU_ASSERT(g_io_done == false); 3005 3006 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3007 poll_threads(); 3008 CU_ASSERT(g_io_done == true); 3009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3010 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3011 3012 /* Same for I/O not aligned to write_unit_size */ 3013 g_io_done = false; 3014 3015 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3016 CU_ASSERT(rc == 0); 3017 CU_ASSERT(g_io_done == false); 3018 3019 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3020 poll_threads(); 3021 CU_ASSERT(g_io_done == true); 3022 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3023 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3024 3025 /* Write should fail if it needs to be split but there are not enough iovs to submit 3026 * an entire write unit */ 3027 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3028 g_io_done = false; 3029 3030 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3031 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3032 iov[i].iov_len = 512; 3033 } 3034 3035 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3036 io_done, NULL); 3037 CU_ASSERT(rc == 0); 3038 CU_ASSERT(g_io_done == false); 3039 3040 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3041 poll_threads(); 3042 CU_ASSERT(g_io_done == true); 3043 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3044 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3045 3046 spdk_put_io_channel(io_ch); 3047 spdk_bdev_close(desc); 3048 free_bdev(bdev); 3049 ut_fini_bdev(); 3050 } 3051 3052 static void 3053 bdev_io_alignment(void) 3054 { 3055 struct spdk_bdev *bdev; 3056 struct spdk_bdev_desc *desc = NULL; 3057 struct spdk_io_channel *io_ch; 3058 struct spdk_bdev_opts bdev_opts = {}; 3059 int rc; 3060 void *buf = NULL; 3061 struct iovec iovs[2]; 3062 int iovcnt; 3063 uint64_t alignment; 3064 3065 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3066 bdev_opts.bdev_io_pool_size = 20; 3067 bdev_opts.bdev_io_cache_size = 2; 3068 ut_init_bdev(&bdev_opts); 3069 3070 fn_table.submit_request = stub_submit_request_get_buf; 3071 bdev = allocate_bdev("bdev0"); 3072 3073 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3074 CU_ASSERT(rc == 0); 3075 CU_ASSERT(desc != NULL); 3076 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3077 io_ch = spdk_bdev_get_io_channel(desc); 3078 CU_ASSERT(io_ch != NULL); 3079 3080 /* Create aligned buffer */ 3081 rc = posix_memalign(&buf, 4096, 8192); 3082 SPDK_CU_ASSERT_FATAL(rc == 0); 3083 3084 /* Pass aligned single buffer with no alignment required */ 3085 alignment = 1; 3086 bdev->required_alignment = spdk_u32log2(alignment); 3087 3088 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3089 CU_ASSERT(rc == 0); 3090 stub_complete_io(1); 3091 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3092 alignment)); 3093 3094 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3095 CU_ASSERT(rc == 0); 3096 stub_complete_io(1); 3097 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3098 alignment)); 3099 3100 /* Pass unaligned single buffer with no alignment required */ 3101 alignment = 1; 3102 bdev->required_alignment = spdk_u32log2(alignment); 3103 3104 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3105 CU_ASSERT(rc == 0); 3106 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3107 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3108 stub_complete_io(1); 3109 3110 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3111 CU_ASSERT(rc == 0); 3112 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3113 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3114 stub_complete_io(1); 3115 3116 /* Pass unaligned single buffer with 512 alignment required */ 3117 alignment = 512; 3118 bdev->required_alignment = spdk_u32log2(alignment); 3119 3120 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3121 CU_ASSERT(rc == 0); 3122 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3123 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3124 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3125 alignment)); 3126 stub_complete_io(1); 3127 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3128 3129 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3130 CU_ASSERT(rc == 0); 3131 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3132 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3133 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3134 alignment)); 3135 stub_complete_io(1); 3136 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3137 3138 /* Pass unaligned single buffer with 4096 alignment required */ 3139 alignment = 4096; 3140 bdev->required_alignment = spdk_u32log2(alignment); 3141 3142 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3143 CU_ASSERT(rc == 0); 3144 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3145 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3146 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3147 alignment)); 3148 stub_complete_io(1); 3149 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3150 3151 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3152 CU_ASSERT(rc == 0); 3153 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3154 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3155 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3156 alignment)); 3157 stub_complete_io(1); 3158 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3159 3160 /* Pass aligned iovs with no alignment required */ 3161 alignment = 1; 3162 bdev->required_alignment = spdk_u32log2(alignment); 3163 3164 iovcnt = 1; 3165 iovs[0].iov_base = buf; 3166 iovs[0].iov_len = 512; 3167 3168 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3169 CU_ASSERT(rc == 0); 3170 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3171 stub_complete_io(1); 3172 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3173 3174 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3175 CU_ASSERT(rc == 0); 3176 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3177 stub_complete_io(1); 3178 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3179 3180 /* Pass unaligned iovs with no alignment required */ 3181 alignment = 1; 3182 bdev->required_alignment = spdk_u32log2(alignment); 3183 3184 iovcnt = 2; 3185 iovs[0].iov_base = buf + 16; 3186 iovs[0].iov_len = 256; 3187 iovs[1].iov_base = buf + 16 + 256 + 32; 3188 iovs[1].iov_len = 256; 3189 3190 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3191 CU_ASSERT(rc == 0); 3192 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3193 stub_complete_io(1); 3194 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3195 3196 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3197 CU_ASSERT(rc == 0); 3198 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3199 stub_complete_io(1); 3200 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3201 3202 /* Pass unaligned iov with 2048 alignment required */ 3203 alignment = 2048; 3204 bdev->required_alignment = spdk_u32log2(alignment); 3205 3206 iovcnt = 2; 3207 iovs[0].iov_base = buf + 16; 3208 iovs[0].iov_len = 256; 3209 iovs[1].iov_base = buf + 16 + 256 + 32; 3210 iovs[1].iov_len = 256; 3211 3212 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3213 CU_ASSERT(rc == 0); 3214 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3215 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3216 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3217 alignment)); 3218 stub_complete_io(1); 3219 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3220 3221 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3222 CU_ASSERT(rc == 0); 3223 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3224 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3225 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3226 alignment)); 3227 stub_complete_io(1); 3228 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3229 3230 /* Pass iov without allocated buffer without alignment required */ 3231 alignment = 1; 3232 bdev->required_alignment = spdk_u32log2(alignment); 3233 3234 iovcnt = 1; 3235 iovs[0].iov_base = NULL; 3236 iovs[0].iov_len = 0; 3237 3238 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3239 CU_ASSERT(rc == 0); 3240 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3241 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3242 alignment)); 3243 stub_complete_io(1); 3244 3245 /* Pass iov without allocated buffer with 1024 alignment required */ 3246 alignment = 1024; 3247 bdev->required_alignment = spdk_u32log2(alignment); 3248 3249 iovcnt = 1; 3250 iovs[0].iov_base = NULL; 3251 iovs[0].iov_len = 0; 3252 3253 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3254 CU_ASSERT(rc == 0); 3255 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3256 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3257 alignment)); 3258 stub_complete_io(1); 3259 3260 spdk_put_io_channel(io_ch); 3261 spdk_bdev_close(desc); 3262 free_bdev(bdev); 3263 fn_table.submit_request = stub_submit_request; 3264 ut_fini_bdev(); 3265 3266 free(buf); 3267 } 3268 3269 static void 3270 bdev_io_alignment_with_boundary(void) 3271 { 3272 struct spdk_bdev *bdev; 3273 struct spdk_bdev_desc *desc = NULL; 3274 struct spdk_io_channel *io_ch; 3275 struct spdk_bdev_opts bdev_opts = {}; 3276 int rc; 3277 void *buf = NULL; 3278 struct iovec iovs[2]; 3279 int iovcnt; 3280 uint64_t alignment; 3281 3282 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3283 bdev_opts.bdev_io_pool_size = 20; 3284 bdev_opts.bdev_io_cache_size = 2; 3285 bdev_opts.opts_size = sizeof(bdev_opts); 3286 ut_init_bdev(&bdev_opts); 3287 3288 fn_table.submit_request = stub_submit_request_get_buf; 3289 bdev = allocate_bdev("bdev0"); 3290 3291 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3292 CU_ASSERT(rc == 0); 3293 CU_ASSERT(desc != NULL); 3294 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3295 io_ch = spdk_bdev_get_io_channel(desc); 3296 CU_ASSERT(io_ch != NULL); 3297 3298 /* Create aligned buffer */ 3299 rc = posix_memalign(&buf, 4096, 131072); 3300 SPDK_CU_ASSERT_FATAL(rc == 0); 3301 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3302 3303 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3304 alignment = 512; 3305 bdev->required_alignment = spdk_u32log2(alignment); 3306 bdev->optimal_io_boundary = 2; 3307 bdev->split_on_optimal_io_boundary = true; 3308 3309 iovcnt = 1; 3310 iovs[0].iov_base = NULL; 3311 iovs[0].iov_len = 512 * 3; 3312 3313 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3314 CU_ASSERT(rc == 0); 3315 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3316 stub_complete_io(2); 3317 3318 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3319 alignment = 512; 3320 bdev->required_alignment = spdk_u32log2(alignment); 3321 bdev->optimal_io_boundary = 16; 3322 bdev->split_on_optimal_io_boundary = true; 3323 3324 iovcnt = 1; 3325 iovs[0].iov_base = NULL; 3326 iovs[0].iov_len = 512 * 16; 3327 3328 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3329 CU_ASSERT(rc == 0); 3330 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3331 stub_complete_io(2); 3332 3333 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3334 alignment = 512; 3335 bdev->required_alignment = spdk_u32log2(alignment); 3336 bdev->optimal_io_boundary = 128; 3337 bdev->split_on_optimal_io_boundary = true; 3338 3339 iovcnt = 1; 3340 iovs[0].iov_base = buf + 16; 3341 iovs[0].iov_len = 512 * 160; 3342 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3343 CU_ASSERT(rc == 0); 3344 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3345 stub_complete_io(2); 3346 3347 /* 512 * 3 with 2 IO boundary */ 3348 alignment = 512; 3349 bdev->required_alignment = spdk_u32log2(alignment); 3350 bdev->optimal_io_boundary = 2; 3351 bdev->split_on_optimal_io_boundary = true; 3352 3353 iovcnt = 2; 3354 iovs[0].iov_base = buf + 16; 3355 iovs[0].iov_len = 512; 3356 iovs[1].iov_base = buf + 16 + 512 + 32; 3357 iovs[1].iov_len = 1024; 3358 3359 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3360 CU_ASSERT(rc == 0); 3361 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3362 stub_complete_io(2); 3363 3364 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3365 CU_ASSERT(rc == 0); 3366 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3367 stub_complete_io(2); 3368 3369 /* 512 * 64 with 32 IO boundary */ 3370 bdev->optimal_io_boundary = 32; 3371 iovcnt = 2; 3372 iovs[0].iov_base = buf + 16; 3373 iovs[0].iov_len = 16384; 3374 iovs[1].iov_base = buf + 16 + 16384 + 32; 3375 iovs[1].iov_len = 16384; 3376 3377 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3378 CU_ASSERT(rc == 0); 3379 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3380 stub_complete_io(3); 3381 3382 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3383 CU_ASSERT(rc == 0); 3384 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3385 stub_complete_io(3); 3386 3387 /* 512 * 160 with 32 IO boundary */ 3388 iovcnt = 1; 3389 iovs[0].iov_base = buf + 16; 3390 iovs[0].iov_len = 16384 + 65536; 3391 3392 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3393 CU_ASSERT(rc == 0); 3394 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3395 stub_complete_io(6); 3396 3397 spdk_put_io_channel(io_ch); 3398 spdk_bdev_close(desc); 3399 free_bdev(bdev); 3400 fn_table.submit_request = stub_submit_request; 3401 ut_fini_bdev(); 3402 3403 free(buf); 3404 } 3405 3406 static void 3407 histogram_status_cb(void *cb_arg, int status) 3408 { 3409 g_status = status; 3410 } 3411 3412 static void 3413 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3414 { 3415 g_status = status; 3416 g_histogram = histogram; 3417 } 3418 3419 static void 3420 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3421 uint64_t total, uint64_t so_far) 3422 { 3423 g_count += count; 3424 } 3425 3426 static void 3427 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3428 { 3429 spdk_histogram_data_fn cb_fn = cb_arg; 3430 3431 g_status = status; 3432 3433 if (status == 0) { 3434 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3435 } 3436 } 3437 3438 static void 3439 bdev_histograms(void) 3440 { 3441 struct spdk_bdev *bdev; 3442 struct spdk_bdev_desc *desc = NULL; 3443 struct spdk_io_channel *ch; 3444 struct spdk_histogram_data *histogram; 3445 uint8_t buf[4096]; 3446 int rc; 3447 3448 ut_init_bdev(NULL); 3449 3450 bdev = allocate_bdev("bdev"); 3451 3452 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3453 CU_ASSERT(rc == 0); 3454 CU_ASSERT(desc != NULL); 3455 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3456 3457 ch = spdk_bdev_get_io_channel(desc); 3458 CU_ASSERT(ch != NULL); 3459 3460 /* Enable histogram */ 3461 g_status = -1; 3462 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3463 poll_threads(); 3464 CU_ASSERT(g_status == 0); 3465 CU_ASSERT(bdev->internal.histogram_enabled == true); 3466 3467 /* Allocate histogram */ 3468 histogram = spdk_histogram_data_alloc(); 3469 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3470 3471 /* Check if histogram is zeroed */ 3472 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3473 poll_threads(); 3474 CU_ASSERT(g_status == 0); 3475 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3476 3477 g_count = 0; 3478 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3479 3480 CU_ASSERT(g_count == 0); 3481 3482 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3483 CU_ASSERT(rc == 0); 3484 3485 spdk_delay_us(10); 3486 stub_complete_io(1); 3487 poll_threads(); 3488 3489 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3490 CU_ASSERT(rc == 0); 3491 3492 spdk_delay_us(10); 3493 stub_complete_io(1); 3494 poll_threads(); 3495 3496 /* Check if histogram gathered data from all I/O channels */ 3497 g_histogram = NULL; 3498 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3499 poll_threads(); 3500 CU_ASSERT(g_status == 0); 3501 CU_ASSERT(bdev->internal.histogram_enabled == true); 3502 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3503 3504 g_count = 0; 3505 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3506 CU_ASSERT(g_count == 2); 3507 3508 g_count = 0; 3509 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3510 CU_ASSERT(g_status == 0); 3511 CU_ASSERT(g_count == 2); 3512 3513 /* Disable histogram */ 3514 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3515 poll_threads(); 3516 CU_ASSERT(g_status == 0); 3517 CU_ASSERT(bdev->internal.histogram_enabled == false); 3518 3519 /* Try to run histogram commands on disabled bdev */ 3520 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3521 poll_threads(); 3522 CU_ASSERT(g_status == -EFAULT); 3523 3524 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3525 CU_ASSERT(g_status == -EFAULT); 3526 3527 spdk_histogram_data_free(histogram); 3528 spdk_put_io_channel(ch); 3529 spdk_bdev_close(desc); 3530 free_bdev(bdev); 3531 ut_fini_bdev(); 3532 } 3533 3534 static void 3535 _bdev_compare(bool emulated) 3536 { 3537 struct spdk_bdev *bdev; 3538 struct spdk_bdev_desc *desc = NULL; 3539 struct spdk_io_channel *ioch; 3540 struct ut_expected_io *expected_io; 3541 uint64_t offset, num_blocks; 3542 uint32_t num_completed; 3543 char aa_buf[512]; 3544 char bb_buf[512]; 3545 struct iovec compare_iov; 3546 uint8_t expected_io_type; 3547 int rc; 3548 3549 if (emulated) { 3550 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3551 } else { 3552 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3553 } 3554 3555 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3556 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3557 3558 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3559 3560 ut_init_bdev(NULL); 3561 fn_table.submit_request = stub_submit_request_get_buf; 3562 bdev = allocate_bdev("bdev"); 3563 3564 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3565 CU_ASSERT_EQUAL(rc, 0); 3566 SPDK_CU_ASSERT_FATAL(desc != NULL); 3567 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3568 ioch = spdk_bdev_get_io_channel(desc); 3569 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3570 3571 fn_table.submit_request = stub_submit_request_get_buf; 3572 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3573 3574 offset = 50; 3575 num_blocks = 1; 3576 compare_iov.iov_base = aa_buf; 3577 compare_iov.iov_len = sizeof(aa_buf); 3578 3579 /* 1. successful compare */ 3580 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3581 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3582 3583 g_io_done = false; 3584 g_compare_read_buf = aa_buf; 3585 g_compare_read_buf_len = sizeof(aa_buf); 3586 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3587 CU_ASSERT_EQUAL(rc, 0); 3588 num_completed = stub_complete_io(1); 3589 CU_ASSERT_EQUAL(num_completed, 1); 3590 CU_ASSERT(g_io_done == true); 3591 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3592 3593 /* 2. miscompare */ 3594 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3595 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3596 3597 g_io_done = false; 3598 g_compare_read_buf = bb_buf; 3599 g_compare_read_buf_len = sizeof(bb_buf); 3600 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3601 CU_ASSERT_EQUAL(rc, 0); 3602 num_completed = stub_complete_io(1); 3603 CU_ASSERT_EQUAL(num_completed, 1); 3604 CU_ASSERT(g_io_done == true); 3605 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3606 3607 spdk_put_io_channel(ioch); 3608 spdk_bdev_close(desc); 3609 free_bdev(bdev); 3610 fn_table.submit_request = stub_submit_request; 3611 ut_fini_bdev(); 3612 3613 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3614 3615 g_compare_read_buf = NULL; 3616 } 3617 3618 static void 3619 _bdev_compare_with_md(bool emulated) 3620 { 3621 struct spdk_bdev *bdev; 3622 struct spdk_bdev_desc *desc = NULL; 3623 struct spdk_io_channel *ioch; 3624 struct ut_expected_io *expected_io; 3625 uint64_t offset, num_blocks; 3626 uint32_t num_completed; 3627 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3628 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3629 char buf_miscompare[1024 /* 2 * blocklen */]; 3630 char md_buf[16]; 3631 char md_buf_miscompare[16]; 3632 struct iovec compare_iov; 3633 uint8_t expected_io_type; 3634 int rc; 3635 3636 if (emulated) { 3637 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3638 } else { 3639 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3640 } 3641 3642 memset(buf, 0xaa, sizeof(buf)); 3643 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3644 /* make last md different */ 3645 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3646 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3647 memset(md_buf, 0xaa, 16); 3648 memset(md_buf_miscompare, 0xbb, 16); 3649 3650 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3651 3652 ut_init_bdev(NULL); 3653 fn_table.submit_request = stub_submit_request_get_buf; 3654 bdev = allocate_bdev("bdev"); 3655 3656 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3657 CU_ASSERT_EQUAL(rc, 0); 3658 SPDK_CU_ASSERT_FATAL(desc != NULL); 3659 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3660 ioch = spdk_bdev_get_io_channel(desc); 3661 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3662 3663 fn_table.submit_request = stub_submit_request_get_buf; 3664 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3665 3666 offset = 50; 3667 num_blocks = 2; 3668 3669 /* interleaved md & data */ 3670 bdev->md_interleave = true; 3671 bdev->md_len = 8; 3672 bdev->blocklen = 512 + 8; 3673 compare_iov.iov_base = buf; 3674 compare_iov.iov_len = sizeof(buf); 3675 3676 /* 1. successful compare with md interleaved */ 3677 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3678 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3679 3680 g_io_done = false; 3681 g_compare_read_buf = buf; 3682 g_compare_read_buf_len = sizeof(buf); 3683 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3684 CU_ASSERT_EQUAL(rc, 0); 3685 num_completed = stub_complete_io(1); 3686 CU_ASSERT_EQUAL(num_completed, 1); 3687 CU_ASSERT(g_io_done == true); 3688 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3689 3690 /* 2. miscompare with md interleaved */ 3691 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3692 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3693 3694 g_io_done = false; 3695 g_compare_read_buf = buf_interleaved_miscompare; 3696 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3697 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3698 CU_ASSERT_EQUAL(rc, 0); 3699 num_completed = stub_complete_io(1); 3700 CU_ASSERT_EQUAL(num_completed, 1); 3701 CU_ASSERT(g_io_done == true); 3702 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3703 3704 /* Separate data & md buffers */ 3705 bdev->md_interleave = false; 3706 bdev->blocklen = 512; 3707 compare_iov.iov_base = buf; 3708 compare_iov.iov_len = 1024; 3709 3710 /* 3. successful compare with md separated */ 3711 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3712 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3713 3714 g_io_done = false; 3715 g_compare_read_buf = buf; 3716 g_compare_read_buf_len = 1024; 3717 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3718 g_compare_md_buf = md_buf; 3719 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3720 offset, num_blocks, io_done, NULL); 3721 CU_ASSERT_EQUAL(rc, 0); 3722 num_completed = stub_complete_io(1); 3723 CU_ASSERT_EQUAL(num_completed, 1); 3724 CU_ASSERT(g_io_done == true); 3725 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3726 3727 /* 4. miscompare with md separated where md buf is different */ 3728 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3729 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3730 3731 g_io_done = false; 3732 g_compare_read_buf = buf; 3733 g_compare_read_buf_len = 1024; 3734 g_compare_md_buf = md_buf_miscompare; 3735 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3736 offset, num_blocks, io_done, NULL); 3737 CU_ASSERT_EQUAL(rc, 0); 3738 num_completed = stub_complete_io(1); 3739 CU_ASSERT_EQUAL(num_completed, 1); 3740 CU_ASSERT(g_io_done == true); 3741 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3742 3743 /* 5. miscompare with md separated where buf is different */ 3744 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3745 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3746 3747 g_io_done = false; 3748 g_compare_read_buf = buf_miscompare; 3749 g_compare_read_buf_len = sizeof(buf_miscompare); 3750 g_compare_md_buf = md_buf; 3751 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3752 offset, num_blocks, io_done, NULL); 3753 CU_ASSERT_EQUAL(rc, 0); 3754 num_completed = stub_complete_io(1); 3755 CU_ASSERT_EQUAL(num_completed, 1); 3756 CU_ASSERT(g_io_done == true); 3757 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3758 3759 bdev->md_len = 0; 3760 g_compare_md_buf = NULL; 3761 3762 spdk_put_io_channel(ioch); 3763 spdk_bdev_close(desc); 3764 free_bdev(bdev); 3765 fn_table.submit_request = stub_submit_request; 3766 ut_fini_bdev(); 3767 3768 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3769 3770 g_compare_read_buf = NULL; 3771 } 3772 3773 static void 3774 bdev_compare(void) 3775 { 3776 _bdev_compare(false); 3777 _bdev_compare_with_md(false); 3778 } 3779 3780 static void 3781 bdev_compare_emulated(void) 3782 { 3783 _bdev_compare(true); 3784 _bdev_compare_with_md(true); 3785 } 3786 3787 static void 3788 bdev_compare_and_write(void) 3789 { 3790 struct spdk_bdev *bdev; 3791 struct spdk_bdev_desc *desc = NULL; 3792 struct spdk_io_channel *ioch; 3793 struct ut_expected_io *expected_io; 3794 uint64_t offset, num_blocks; 3795 uint32_t num_completed; 3796 char aa_buf[512]; 3797 char bb_buf[512]; 3798 char cc_buf[512]; 3799 char write_buf[512]; 3800 struct iovec compare_iov; 3801 struct iovec write_iov; 3802 int rc; 3803 3804 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3805 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3806 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3807 3808 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3809 3810 ut_init_bdev(NULL); 3811 fn_table.submit_request = stub_submit_request_get_buf; 3812 bdev = allocate_bdev("bdev"); 3813 3814 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3815 CU_ASSERT_EQUAL(rc, 0); 3816 SPDK_CU_ASSERT_FATAL(desc != NULL); 3817 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3818 ioch = spdk_bdev_get_io_channel(desc); 3819 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3820 3821 fn_table.submit_request = stub_submit_request_get_buf; 3822 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3823 3824 offset = 50; 3825 num_blocks = 1; 3826 compare_iov.iov_base = aa_buf; 3827 compare_iov.iov_len = sizeof(aa_buf); 3828 write_iov.iov_base = bb_buf; 3829 write_iov.iov_len = sizeof(bb_buf); 3830 3831 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3832 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3833 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3834 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3835 3836 g_io_done = false; 3837 g_compare_read_buf = aa_buf; 3838 g_compare_read_buf_len = sizeof(aa_buf); 3839 memset(write_buf, 0, sizeof(write_buf)); 3840 g_compare_write_buf = write_buf; 3841 g_compare_write_buf_len = sizeof(write_buf); 3842 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3843 offset, num_blocks, io_done, NULL); 3844 /* Trigger range locking */ 3845 poll_threads(); 3846 CU_ASSERT_EQUAL(rc, 0); 3847 num_completed = stub_complete_io(1); 3848 CU_ASSERT_EQUAL(num_completed, 1); 3849 CU_ASSERT(g_io_done == false); 3850 num_completed = stub_complete_io(1); 3851 /* Trigger range unlocking */ 3852 poll_threads(); 3853 CU_ASSERT_EQUAL(num_completed, 1); 3854 CU_ASSERT(g_io_done == true); 3855 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3856 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3857 3858 /* Test miscompare */ 3859 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3860 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3861 3862 g_io_done = false; 3863 g_compare_read_buf = cc_buf; 3864 g_compare_read_buf_len = sizeof(cc_buf); 3865 memset(write_buf, 0, sizeof(write_buf)); 3866 g_compare_write_buf = write_buf; 3867 g_compare_write_buf_len = sizeof(write_buf); 3868 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3869 offset, num_blocks, io_done, NULL); 3870 /* Trigger range locking */ 3871 poll_threads(); 3872 CU_ASSERT_EQUAL(rc, 0); 3873 num_completed = stub_complete_io(1); 3874 /* Trigger range unlocking earlier because we expect error here */ 3875 poll_threads(); 3876 CU_ASSERT_EQUAL(num_completed, 1); 3877 CU_ASSERT(g_io_done == true); 3878 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3879 num_completed = stub_complete_io(1); 3880 CU_ASSERT_EQUAL(num_completed, 0); 3881 3882 spdk_put_io_channel(ioch); 3883 spdk_bdev_close(desc); 3884 free_bdev(bdev); 3885 fn_table.submit_request = stub_submit_request; 3886 ut_fini_bdev(); 3887 3888 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3889 3890 g_compare_read_buf = NULL; 3891 g_compare_write_buf = NULL; 3892 } 3893 3894 static void 3895 bdev_write_zeroes(void) 3896 { 3897 struct spdk_bdev *bdev; 3898 struct spdk_bdev_desc *desc = NULL; 3899 struct spdk_io_channel *ioch; 3900 struct ut_expected_io *expected_io; 3901 uint64_t offset, num_io_blocks, num_blocks; 3902 uint32_t num_completed, num_requests; 3903 int rc; 3904 3905 ut_init_bdev(NULL); 3906 bdev = allocate_bdev("bdev"); 3907 3908 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3909 CU_ASSERT_EQUAL(rc, 0); 3910 SPDK_CU_ASSERT_FATAL(desc != NULL); 3911 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3912 ioch = spdk_bdev_get_io_channel(desc); 3913 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3914 3915 fn_table.submit_request = stub_submit_request; 3916 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3917 3918 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3919 bdev->md_len = 0; 3920 bdev->blocklen = 4096; 3921 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3922 3923 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3924 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3925 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3926 CU_ASSERT_EQUAL(rc, 0); 3927 num_completed = stub_complete_io(1); 3928 CU_ASSERT_EQUAL(num_completed, 1); 3929 3930 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3931 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3932 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3933 num_requests = 2; 3934 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3935 3936 for (offset = 0; offset < num_requests; ++offset) { 3937 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3938 offset * num_io_blocks, num_io_blocks, 0); 3939 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3940 } 3941 3942 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3943 CU_ASSERT_EQUAL(rc, 0); 3944 num_completed = stub_complete_io(num_requests); 3945 CU_ASSERT_EQUAL(num_completed, num_requests); 3946 3947 /* Check that the splitting is correct if bdev has interleaved metadata */ 3948 bdev->md_interleave = true; 3949 bdev->md_len = 64; 3950 bdev->blocklen = 4096 + 64; 3951 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3952 3953 num_requests = offset = 0; 3954 while (offset < num_blocks) { 3955 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3956 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3957 offset, num_io_blocks, 0); 3958 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3959 offset += num_io_blocks; 3960 num_requests++; 3961 } 3962 3963 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3964 CU_ASSERT_EQUAL(rc, 0); 3965 num_completed = stub_complete_io(num_requests); 3966 CU_ASSERT_EQUAL(num_completed, num_requests); 3967 num_completed = stub_complete_io(num_requests); 3968 assert(num_completed == 0); 3969 3970 /* Check the the same for separate metadata buffer */ 3971 bdev->md_interleave = false; 3972 bdev->md_len = 64; 3973 bdev->blocklen = 4096; 3974 3975 num_requests = offset = 0; 3976 while (offset < num_blocks) { 3977 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3978 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3979 offset, num_io_blocks, 0); 3980 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3981 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3982 offset += num_io_blocks; 3983 num_requests++; 3984 } 3985 3986 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3987 CU_ASSERT_EQUAL(rc, 0); 3988 num_completed = stub_complete_io(num_requests); 3989 CU_ASSERT_EQUAL(num_completed, num_requests); 3990 3991 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3992 spdk_put_io_channel(ioch); 3993 spdk_bdev_close(desc); 3994 free_bdev(bdev); 3995 ut_fini_bdev(); 3996 } 3997 3998 static void 3999 bdev_zcopy_write(void) 4000 { 4001 struct spdk_bdev *bdev; 4002 struct spdk_bdev_desc *desc = NULL; 4003 struct spdk_io_channel *ioch; 4004 struct ut_expected_io *expected_io; 4005 uint64_t offset, num_blocks; 4006 uint32_t num_completed; 4007 char aa_buf[512]; 4008 struct iovec iov; 4009 int rc; 4010 const bool populate = false; 4011 const bool commit = true; 4012 4013 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4014 4015 ut_init_bdev(NULL); 4016 bdev = allocate_bdev("bdev"); 4017 4018 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4019 CU_ASSERT_EQUAL(rc, 0); 4020 SPDK_CU_ASSERT_FATAL(desc != NULL); 4021 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4022 ioch = spdk_bdev_get_io_channel(desc); 4023 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4024 4025 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4026 4027 offset = 50; 4028 num_blocks = 1; 4029 iov.iov_base = NULL; 4030 iov.iov_len = 0; 4031 4032 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4033 g_zcopy_read_buf_len = (uint32_t) -1; 4034 /* Do a zcopy start for a write (populate=false) */ 4035 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4036 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4037 g_io_done = false; 4038 g_zcopy_write_buf = aa_buf; 4039 g_zcopy_write_buf_len = sizeof(aa_buf); 4040 g_zcopy_bdev_io = NULL; 4041 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4042 CU_ASSERT_EQUAL(rc, 0); 4043 num_completed = stub_complete_io(1); 4044 CU_ASSERT_EQUAL(num_completed, 1); 4045 CU_ASSERT(g_io_done == true); 4046 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4047 /* Check that the iov has been set up */ 4048 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4049 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4050 /* Check that the bdev_io has been saved */ 4051 CU_ASSERT(g_zcopy_bdev_io != NULL); 4052 /* Now do the zcopy end for a write (commit=true) */ 4053 g_io_done = false; 4054 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4055 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4056 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4057 CU_ASSERT_EQUAL(rc, 0); 4058 num_completed = stub_complete_io(1); 4059 CU_ASSERT_EQUAL(num_completed, 1); 4060 CU_ASSERT(g_io_done == true); 4061 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4062 /* Check the g_zcopy are reset by io_done */ 4063 CU_ASSERT(g_zcopy_write_buf == NULL); 4064 CU_ASSERT(g_zcopy_write_buf_len == 0); 4065 /* Check that io_done has freed the g_zcopy_bdev_io */ 4066 CU_ASSERT(g_zcopy_bdev_io == NULL); 4067 4068 /* Check the zcopy read buffer has not been touched which 4069 * ensures that the correct buffers were used. 4070 */ 4071 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4072 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4073 4074 spdk_put_io_channel(ioch); 4075 spdk_bdev_close(desc); 4076 free_bdev(bdev); 4077 ut_fini_bdev(); 4078 } 4079 4080 static void 4081 bdev_zcopy_read(void) 4082 { 4083 struct spdk_bdev *bdev; 4084 struct spdk_bdev_desc *desc = NULL; 4085 struct spdk_io_channel *ioch; 4086 struct ut_expected_io *expected_io; 4087 uint64_t offset, num_blocks; 4088 uint32_t num_completed; 4089 char aa_buf[512]; 4090 struct iovec iov; 4091 int rc; 4092 const bool populate = true; 4093 const bool commit = false; 4094 4095 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4096 4097 ut_init_bdev(NULL); 4098 bdev = allocate_bdev("bdev"); 4099 4100 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4101 CU_ASSERT_EQUAL(rc, 0); 4102 SPDK_CU_ASSERT_FATAL(desc != NULL); 4103 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4104 ioch = spdk_bdev_get_io_channel(desc); 4105 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4106 4107 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4108 4109 offset = 50; 4110 num_blocks = 1; 4111 iov.iov_base = NULL; 4112 iov.iov_len = 0; 4113 4114 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4115 g_zcopy_write_buf_len = (uint32_t) -1; 4116 4117 /* Do a zcopy start for a read (populate=true) */ 4118 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4119 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4120 g_io_done = false; 4121 g_zcopy_read_buf = aa_buf; 4122 g_zcopy_read_buf_len = sizeof(aa_buf); 4123 g_zcopy_bdev_io = NULL; 4124 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4125 CU_ASSERT_EQUAL(rc, 0); 4126 num_completed = stub_complete_io(1); 4127 CU_ASSERT_EQUAL(num_completed, 1); 4128 CU_ASSERT(g_io_done == true); 4129 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4130 /* Check that the iov has been set up */ 4131 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4132 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4133 /* Check that the bdev_io has been saved */ 4134 CU_ASSERT(g_zcopy_bdev_io != NULL); 4135 4136 /* Now do the zcopy end for a read (commit=false) */ 4137 g_io_done = false; 4138 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4139 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4140 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4141 CU_ASSERT_EQUAL(rc, 0); 4142 num_completed = stub_complete_io(1); 4143 CU_ASSERT_EQUAL(num_completed, 1); 4144 CU_ASSERT(g_io_done == true); 4145 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4146 /* Check the g_zcopy are reset by io_done */ 4147 CU_ASSERT(g_zcopy_read_buf == NULL); 4148 CU_ASSERT(g_zcopy_read_buf_len == 0); 4149 /* Check that io_done has freed the g_zcopy_bdev_io */ 4150 CU_ASSERT(g_zcopy_bdev_io == NULL); 4151 4152 /* Check the zcopy write buffer has not been touched which 4153 * ensures that the correct buffers were used. 4154 */ 4155 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4156 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4157 4158 spdk_put_io_channel(ioch); 4159 spdk_bdev_close(desc); 4160 free_bdev(bdev); 4161 ut_fini_bdev(); 4162 } 4163 4164 static void 4165 bdev_open_while_hotremove(void) 4166 { 4167 struct spdk_bdev *bdev; 4168 struct spdk_bdev_desc *desc[2] = {}; 4169 int rc; 4170 4171 bdev = allocate_bdev("bdev"); 4172 4173 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4174 CU_ASSERT(rc == 0); 4175 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4176 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4177 4178 spdk_bdev_unregister(bdev, NULL, NULL); 4179 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4180 poll_threads(); 4181 4182 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4183 CU_ASSERT(rc == -ENODEV); 4184 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4185 4186 spdk_bdev_close(desc[0]); 4187 free_bdev(bdev); 4188 } 4189 4190 static void 4191 bdev_close_while_hotremove(void) 4192 { 4193 struct spdk_bdev *bdev; 4194 struct spdk_bdev_desc *desc = NULL; 4195 int rc = 0; 4196 4197 bdev = allocate_bdev("bdev"); 4198 4199 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4200 CU_ASSERT_EQUAL(rc, 0); 4201 SPDK_CU_ASSERT_FATAL(desc != NULL); 4202 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4203 4204 /* Simulate hot-unplug by unregistering bdev */ 4205 g_event_type1 = 0xFF; 4206 g_unregister_arg = NULL; 4207 g_unregister_rc = -1; 4208 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4209 /* Close device while remove event is in flight */ 4210 spdk_bdev_close(desc); 4211 4212 /* Ensure that unregister callback is delayed */ 4213 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4214 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4215 4216 poll_threads(); 4217 4218 /* Event callback shall not be issued because device was closed */ 4219 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4220 /* Unregister callback is issued */ 4221 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4222 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4223 4224 free_bdev(bdev); 4225 } 4226 4227 static void 4228 bdev_open_ext(void) 4229 { 4230 struct spdk_bdev *bdev; 4231 struct spdk_bdev_desc *desc1 = NULL; 4232 struct spdk_bdev_desc *desc2 = NULL; 4233 int rc = 0; 4234 4235 bdev = allocate_bdev("bdev"); 4236 4237 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4238 CU_ASSERT_EQUAL(rc, -EINVAL); 4239 4240 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4241 CU_ASSERT_EQUAL(rc, 0); 4242 4243 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4244 CU_ASSERT_EQUAL(rc, 0); 4245 4246 g_event_type1 = 0xFF; 4247 g_event_type2 = 0xFF; 4248 4249 /* Simulate hot-unplug by unregistering bdev */ 4250 spdk_bdev_unregister(bdev, NULL, NULL); 4251 poll_threads(); 4252 4253 /* Check if correct events have been triggered in event callback fn */ 4254 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4255 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4256 4257 free_bdev(bdev); 4258 poll_threads(); 4259 } 4260 4261 static void 4262 bdev_open_ext_unregister(void) 4263 { 4264 struct spdk_bdev *bdev; 4265 struct spdk_bdev_desc *desc1 = NULL; 4266 struct spdk_bdev_desc *desc2 = NULL; 4267 struct spdk_bdev_desc *desc3 = NULL; 4268 struct spdk_bdev_desc *desc4 = NULL; 4269 int rc = 0; 4270 4271 bdev = allocate_bdev("bdev"); 4272 4273 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4274 CU_ASSERT_EQUAL(rc, -EINVAL); 4275 4276 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4277 CU_ASSERT_EQUAL(rc, 0); 4278 4279 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4280 CU_ASSERT_EQUAL(rc, 0); 4281 4282 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4283 CU_ASSERT_EQUAL(rc, 0); 4284 4285 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4286 CU_ASSERT_EQUAL(rc, 0); 4287 4288 g_event_type1 = 0xFF; 4289 g_event_type2 = 0xFF; 4290 g_event_type3 = 0xFF; 4291 g_event_type4 = 0xFF; 4292 4293 g_unregister_arg = NULL; 4294 g_unregister_rc = -1; 4295 4296 /* Simulate hot-unplug by unregistering bdev */ 4297 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4298 4299 /* 4300 * Unregister is handled asynchronously and event callback 4301 * (i.e., above bdev_open_cbN) will be called. 4302 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4303 * close the desc3 and desc4 so that the bdev is not closed. 4304 */ 4305 poll_threads(); 4306 4307 /* Check if correct events have been triggered in event callback fn */ 4308 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4309 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4310 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4311 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4312 4313 /* Check that unregister callback is delayed */ 4314 CU_ASSERT(g_unregister_arg == NULL); 4315 CU_ASSERT(g_unregister_rc == -1); 4316 4317 /* 4318 * Explicitly close desc3. As desc4 is still opened there, the 4319 * unergister callback is still delayed to execute. 4320 */ 4321 spdk_bdev_close(desc3); 4322 CU_ASSERT(g_unregister_arg == NULL); 4323 CU_ASSERT(g_unregister_rc == -1); 4324 4325 /* 4326 * Explicitly close desc4 to trigger the ongoing bdev unregister 4327 * operation after last desc is closed. 4328 */ 4329 spdk_bdev_close(desc4); 4330 4331 /* Poll the thread for the async unregister operation */ 4332 poll_threads(); 4333 4334 /* Check that unregister callback is executed */ 4335 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4336 CU_ASSERT(g_unregister_rc == 0); 4337 4338 free_bdev(bdev); 4339 poll_threads(); 4340 } 4341 4342 struct timeout_io_cb_arg { 4343 struct iovec iov; 4344 uint8_t type; 4345 }; 4346 4347 static int 4348 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4349 { 4350 struct spdk_bdev_io *bdev_io; 4351 int n = 0; 4352 4353 if (!ch) { 4354 return -1; 4355 } 4356 4357 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4358 n++; 4359 } 4360 4361 return n; 4362 } 4363 4364 static void 4365 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4366 { 4367 struct timeout_io_cb_arg *ctx = cb_arg; 4368 4369 ctx->type = bdev_io->type; 4370 ctx->iov.iov_base = bdev_io->iov.iov_base; 4371 ctx->iov.iov_len = bdev_io->iov.iov_len; 4372 } 4373 4374 static void 4375 bdev_set_io_timeout(void) 4376 { 4377 struct spdk_bdev *bdev; 4378 struct spdk_bdev_desc *desc = NULL; 4379 struct spdk_io_channel *io_ch = NULL; 4380 struct spdk_bdev_channel *bdev_ch = NULL; 4381 struct timeout_io_cb_arg cb_arg; 4382 4383 ut_init_bdev(NULL); 4384 bdev = allocate_bdev("bdev"); 4385 4386 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4387 SPDK_CU_ASSERT_FATAL(desc != NULL); 4388 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4389 4390 io_ch = spdk_bdev_get_io_channel(desc); 4391 CU_ASSERT(io_ch != NULL); 4392 4393 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4394 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4395 4396 /* This is the part1. 4397 * We will check the bdev_ch->io_submitted list 4398 * TO make sure that it can link IOs and only the user submitted IOs 4399 */ 4400 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4401 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4402 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4403 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4404 stub_complete_io(1); 4405 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4406 stub_complete_io(1); 4407 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4408 4409 /* Split IO */ 4410 bdev->optimal_io_boundary = 16; 4411 bdev->split_on_optimal_io_boundary = true; 4412 4413 /* Now test that a single-vector command is split correctly. 4414 * Offset 14, length 8, payload 0xF000 4415 * Child - Offset 14, length 2, payload 0xF000 4416 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4417 * 4418 * Set up the expected values before calling spdk_bdev_read_blocks 4419 */ 4420 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4421 /* We count all submitted IOs including IO that are generated by splitting. */ 4422 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4423 stub_complete_io(1); 4424 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4425 stub_complete_io(1); 4426 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4427 4428 /* Also include the reset IO */ 4429 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4430 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4431 poll_threads(); 4432 stub_complete_io(1); 4433 poll_threads(); 4434 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4435 4436 /* This is part2 4437 * Test the desc timeout poller register 4438 */ 4439 4440 /* Successfully set the timeout */ 4441 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4442 CU_ASSERT(desc->io_timeout_poller != NULL); 4443 CU_ASSERT(desc->timeout_in_sec == 30); 4444 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4445 CU_ASSERT(desc->cb_arg == &cb_arg); 4446 4447 /* Change the timeout limit */ 4448 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4449 CU_ASSERT(desc->io_timeout_poller != NULL); 4450 CU_ASSERT(desc->timeout_in_sec == 20); 4451 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4452 CU_ASSERT(desc->cb_arg == &cb_arg); 4453 4454 /* Disable the timeout */ 4455 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4456 CU_ASSERT(desc->io_timeout_poller == NULL); 4457 4458 /* This the part3 4459 * We will test to catch timeout IO and check whether the IO is 4460 * the submitted one. 4461 */ 4462 memset(&cb_arg, 0, sizeof(cb_arg)); 4463 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4464 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4465 4466 /* Don't reach the limit */ 4467 spdk_delay_us(15 * spdk_get_ticks_hz()); 4468 poll_threads(); 4469 CU_ASSERT(cb_arg.type == 0); 4470 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4471 CU_ASSERT(cb_arg.iov.iov_len == 0); 4472 4473 /* 15 + 15 = 30 reach the limit */ 4474 spdk_delay_us(15 * spdk_get_ticks_hz()); 4475 poll_threads(); 4476 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4477 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4478 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4479 stub_complete_io(1); 4480 4481 /* Use the same split IO above and check the IO */ 4482 memset(&cb_arg, 0, sizeof(cb_arg)); 4483 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4484 4485 /* The first child complete in time */ 4486 spdk_delay_us(15 * spdk_get_ticks_hz()); 4487 poll_threads(); 4488 stub_complete_io(1); 4489 CU_ASSERT(cb_arg.type == 0); 4490 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4491 CU_ASSERT(cb_arg.iov.iov_len == 0); 4492 4493 /* The second child reach the limit */ 4494 spdk_delay_us(15 * spdk_get_ticks_hz()); 4495 poll_threads(); 4496 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4497 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4498 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4499 stub_complete_io(1); 4500 4501 /* Also include the reset IO */ 4502 memset(&cb_arg, 0, sizeof(cb_arg)); 4503 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4504 spdk_delay_us(30 * spdk_get_ticks_hz()); 4505 poll_threads(); 4506 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4507 stub_complete_io(1); 4508 poll_threads(); 4509 4510 spdk_put_io_channel(io_ch); 4511 spdk_bdev_close(desc); 4512 free_bdev(bdev); 4513 ut_fini_bdev(); 4514 } 4515 4516 static void 4517 bdev_set_qd_sampling(void) 4518 { 4519 struct spdk_bdev *bdev; 4520 struct spdk_bdev_desc *desc = NULL; 4521 struct spdk_io_channel *io_ch = NULL; 4522 struct spdk_bdev_channel *bdev_ch = NULL; 4523 struct timeout_io_cb_arg cb_arg; 4524 4525 ut_init_bdev(NULL); 4526 bdev = allocate_bdev("bdev"); 4527 4528 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4529 SPDK_CU_ASSERT_FATAL(desc != NULL); 4530 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4531 4532 io_ch = spdk_bdev_get_io_channel(desc); 4533 CU_ASSERT(io_ch != NULL); 4534 4535 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4536 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4537 4538 /* This is the part1. 4539 * We will check the bdev_ch->io_submitted list 4540 * TO make sure that it can link IOs and only the user submitted IOs 4541 */ 4542 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4543 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4544 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4545 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4546 stub_complete_io(1); 4547 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4548 stub_complete_io(1); 4549 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4550 4551 /* This is the part2. 4552 * Test the bdev's qd poller register 4553 */ 4554 /* 1st Successfully set the qd sampling period */ 4555 spdk_bdev_set_qd_sampling_period(bdev, 10); 4556 CU_ASSERT(bdev->internal.new_period == 10); 4557 CU_ASSERT(bdev->internal.period == 10); 4558 CU_ASSERT(bdev->internal.qd_desc != NULL); 4559 poll_threads(); 4560 CU_ASSERT(bdev->internal.qd_poller != NULL); 4561 4562 /* 2nd Change the qd sampling period */ 4563 spdk_bdev_set_qd_sampling_period(bdev, 20); 4564 CU_ASSERT(bdev->internal.new_period == 20); 4565 CU_ASSERT(bdev->internal.period == 10); 4566 CU_ASSERT(bdev->internal.qd_desc != NULL); 4567 poll_threads(); 4568 CU_ASSERT(bdev->internal.qd_poller != NULL); 4569 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4570 4571 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4572 spdk_delay_us(20); 4573 poll_thread_times(0, 1); 4574 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4575 spdk_bdev_set_qd_sampling_period(bdev, 30); 4576 CU_ASSERT(bdev->internal.new_period == 30); 4577 CU_ASSERT(bdev->internal.period == 20); 4578 poll_threads(); 4579 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4580 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4581 4582 /* 4th Disable the qd sampling period */ 4583 spdk_bdev_set_qd_sampling_period(bdev, 0); 4584 CU_ASSERT(bdev->internal.new_period == 0); 4585 CU_ASSERT(bdev->internal.period == 30); 4586 poll_threads(); 4587 CU_ASSERT(bdev->internal.qd_poller == NULL); 4588 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4589 CU_ASSERT(bdev->internal.qd_desc == NULL); 4590 4591 /* This is the part3. 4592 * We will test the submitted IO and reset works 4593 * properly with the qd sampling. 4594 */ 4595 memset(&cb_arg, 0, sizeof(cb_arg)); 4596 spdk_bdev_set_qd_sampling_period(bdev, 1); 4597 poll_threads(); 4598 4599 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4600 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4601 4602 /* Also include the reset IO */ 4603 memset(&cb_arg, 0, sizeof(cb_arg)); 4604 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4605 poll_threads(); 4606 4607 /* Close the desc */ 4608 spdk_put_io_channel(io_ch); 4609 spdk_bdev_close(desc); 4610 4611 /* Complete the submitted IO and reset */ 4612 stub_complete_io(2); 4613 poll_threads(); 4614 4615 free_bdev(bdev); 4616 ut_fini_bdev(); 4617 } 4618 4619 static void 4620 lba_range_overlap(void) 4621 { 4622 struct lba_range r1, r2; 4623 4624 r1.offset = 100; 4625 r1.length = 50; 4626 4627 r2.offset = 0; 4628 r2.length = 1; 4629 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4630 4631 r2.offset = 0; 4632 r2.length = 100; 4633 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4634 4635 r2.offset = 0; 4636 r2.length = 110; 4637 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4638 4639 r2.offset = 100; 4640 r2.length = 10; 4641 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4642 4643 r2.offset = 110; 4644 r2.length = 20; 4645 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4646 4647 r2.offset = 140; 4648 r2.length = 150; 4649 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4650 4651 r2.offset = 130; 4652 r2.length = 200; 4653 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4654 4655 r2.offset = 150; 4656 r2.length = 100; 4657 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4658 4659 r2.offset = 110; 4660 r2.length = 0; 4661 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4662 } 4663 4664 static bool g_lock_lba_range_done; 4665 static bool g_unlock_lba_range_done; 4666 4667 static void 4668 lock_lba_range_done(void *ctx, int status) 4669 { 4670 g_lock_lba_range_done = true; 4671 } 4672 4673 static void 4674 unlock_lba_range_done(void *ctx, int status) 4675 { 4676 g_unlock_lba_range_done = true; 4677 } 4678 4679 static void 4680 lock_lba_range_check_ranges(void) 4681 { 4682 struct spdk_bdev *bdev; 4683 struct spdk_bdev_desc *desc = NULL; 4684 struct spdk_io_channel *io_ch; 4685 struct spdk_bdev_channel *channel; 4686 struct lba_range *range; 4687 int ctx1; 4688 int rc; 4689 4690 ut_init_bdev(NULL); 4691 bdev = allocate_bdev("bdev0"); 4692 4693 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4694 CU_ASSERT(rc == 0); 4695 CU_ASSERT(desc != NULL); 4696 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4697 io_ch = spdk_bdev_get_io_channel(desc); 4698 CU_ASSERT(io_ch != NULL); 4699 channel = spdk_io_channel_get_ctx(io_ch); 4700 4701 g_lock_lba_range_done = false; 4702 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4703 CU_ASSERT(rc == 0); 4704 poll_threads(); 4705 4706 CU_ASSERT(g_lock_lba_range_done == true); 4707 range = TAILQ_FIRST(&channel->locked_ranges); 4708 SPDK_CU_ASSERT_FATAL(range != NULL); 4709 CU_ASSERT(range->offset == 20); 4710 CU_ASSERT(range->length == 10); 4711 CU_ASSERT(range->owner_ch == channel); 4712 4713 /* Unlocks must exactly match a lock. */ 4714 g_unlock_lba_range_done = false; 4715 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4716 CU_ASSERT(rc == -EINVAL); 4717 CU_ASSERT(g_unlock_lba_range_done == false); 4718 4719 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4720 CU_ASSERT(rc == 0); 4721 spdk_delay_us(100); 4722 poll_threads(); 4723 4724 CU_ASSERT(g_unlock_lba_range_done == true); 4725 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4726 4727 spdk_put_io_channel(io_ch); 4728 spdk_bdev_close(desc); 4729 free_bdev(bdev); 4730 ut_fini_bdev(); 4731 } 4732 4733 static void 4734 lock_lba_range_with_io_outstanding(void) 4735 { 4736 struct spdk_bdev *bdev; 4737 struct spdk_bdev_desc *desc = NULL; 4738 struct spdk_io_channel *io_ch; 4739 struct spdk_bdev_channel *channel; 4740 struct lba_range *range; 4741 char buf[4096]; 4742 int ctx1; 4743 int rc; 4744 4745 ut_init_bdev(NULL); 4746 bdev = allocate_bdev("bdev0"); 4747 4748 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4749 CU_ASSERT(rc == 0); 4750 CU_ASSERT(desc != NULL); 4751 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4752 io_ch = spdk_bdev_get_io_channel(desc); 4753 CU_ASSERT(io_ch != NULL); 4754 channel = spdk_io_channel_get_ctx(io_ch); 4755 4756 g_io_done = false; 4757 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4758 CU_ASSERT(rc == 0); 4759 4760 g_lock_lba_range_done = false; 4761 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4762 CU_ASSERT(rc == 0); 4763 poll_threads(); 4764 4765 /* The lock should immediately become valid, since there are no outstanding 4766 * write I/O. 4767 */ 4768 CU_ASSERT(g_io_done == false); 4769 CU_ASSERT(g_lock_lba_range_done == true); 4770 range = TAILQ_FIRST(&channel->locked_ranges); 4771 SPDK_CU_ASSERT_FATAL(range != NULL); 4772 CU_ASSERT(range->offset == 20); 4773 CU_ASSERT(range->length == 10); 4774 CU_ASSERT(range->owner_ch == channel); 4775 CU_ASSERT(range->locked_ctx == &ctx1); 4776 4777 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4778 CU_ASSERT(rc == 0); 4779 stub_complete_io(1); 4780 spdk_delay_us(100); 4781 poll_threads(); 4782 4783 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4784 4785 /* Now try again, but with a write I/O. */ 4786 g_io_done = false; 4787 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4788 CU_ASSERT(rc == 0); 4789 4790 g_lock_lba_range_done = false; 4791 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4792 CU_ASSERT(rc == 0); 4793 poll_threads(); 4794 4795 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4796 * But note that the range should be on the channel's locked_list, to make sure no 4797 * new write I/O are started. 4798 */ 4799 CU_ASSERT(g_io_done == false); 4800 CU_ASSERT(g_lock_lba_range_done == false); 4801 range = TAILQ_FIRST(&channel->locked_ranges); 4802 SPDK_CU_ASSERT_FATAL(range != NULL); 4803 CU_ASSERT(range->offset == 20); 4804 CU_ASSERT(range->length == 10); 4805 4806 /* Complete the write I/O. This should make the lock valid (checked by confirming 4807 * our callback was invoked). 4808 */ 4809 stub_complete_io(1); 4810 spdk_delay_us(100); 4811 poll_threads(); 4812 CU_ASSERT(g_io_done == true); 4813 CU_ASSERT(g_lock_lba_range_done == true); 4814 4815 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4816 CU_ASSERT(rc == 0); 4817 poll_threads(); 4818 4819 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4820 4821 spdk_put_io_channel(io_ch); 4822 spdk_bdev_close(desc); 4823 free_bdev(bdev); 4824 ut_fini_bdev(); 4825 } 4826 4827 static void 4828 lock_lba_range_overlapped(void) 4829 { 4830 struct spdk_bdev *bdev; 4831 struct spdk_bdev_desc *desc = NULL; 4832 struct spdk_io_channel *io_ch; 4833 struct spdk_bdev_channel *channel; 4834 struct lba_range *range; 4835 int ctx1; 4836 int rc; 4837 4838 ut_init_bdev(NULL); 4839 bdev = allocate_bdev("bdev0"); 4840 4841 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4842 CU_ASSERT(rc == 0); 4843 CU_ASSERT(desc != NULL); 4844 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4845 io_ch = spdk_bdev_get_io_channel(desc); 4846 CU_ASSERT(io_ch != NULL); 4847 channel = spdk_io_channel_get_ctx(io_ch); 4848 4849 /* Lock range 20-29. */ 4850 g_lock_lba_range_done = false; 4851 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4852 CU_ASSERT(rc == 0); 4853 poll_threads(); 4854 4855 CU_ASSERT(g_lock_lba_range_done == true); 4856 range = TAILQ_FIRST(&channel->locked_ranges); 4857 SPDK_CU_ASSERT_FATAL(range != NULL); 4858 CU_ASSERT(range->offset == 20); 4859 CU_ASSERT(range->length == 10); 4860 4861 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4862 * 20-29. 4863 */ 4864 g_lock_lba_range_done = false; 4865 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4866 CU_ASSERT(rc == 0); 4867 poll_threads(); 4868 4869 CU_ASSERT(g_lock_lba_range_done == false); 4870 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4871 SPDK_CU_ASSERT_FATAL(range != NULL); 4872 CU_ASSERT(range->offset == 25); 4873 CU_ASSERT(range->length == 15); 4874 4875 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4876 * no longer overlaps with an active lock. 4877 */ 4878 g_unlock_lba_range_done = false; 4879 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4880 CU_ASSERT(rc == 0); 4881 poll_threads(); 4882 4883 CU_ASSERT(g_unlock_lba_range_done == true); 4884 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4885 range = TAILQ_FIRST(&channel->locked_ranges); 4886 SPDK_CU_ASSERT_FATAL(range != NULL); 4887 CU_ASSERT(range->offset == 25); 4888 CU_ASSERT(range->length == 15); 4889 4890 /* Lock 40-59. This should immediately lock since it does not overlap with the 4891 * currently active 25-39 lock. 4892 */ 4893 g_lock_lba_range_done = false; 4894 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4895 CU_ASSERT(rc == 0); 4896 poll_threads(); 4897 4898 CU_ASSERT(g_lock_lba_range_done == true); 4899 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4900 SPDK_CU_ASSERT_FATAL(range != NULL); 4901 range = TAILQ_NEXT(range, tailq); 4902 SPDK_CU_ASSERT_FATAL(range != NULL); 4903 CU_ASSERT(range->offset == 40); 4904 CU_ASSERT(range->length == 20); 4905 4906 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4907 g_lock_lba_range_done = false; 4908 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4909 CU_ASSERT(rc == 0); 4910 poll_threads(); 4911 4912 CU_ASSERT(g_lock_lba_range_done == false); 4913 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4914 SPDK_CU_ASSERT_FATAL(range != NULL); 4915 CU_ASSERT(range->offset == 35); 4916 CU_ASSERT(range->length == 10); 4917 4918 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4919 * the 40-59 lock is still active. 4920 */ 4921 g_unlock_lba_range_done = false; 4922 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4923 CU_ASSERT(rc == 0); 4924 poll_threads(); 4925 4926 CU_ASSERT(g_unlock_lba_range_done == true); 4927 CU_ASSERT(g_lock_lba_range_done == false); 4928 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4929 SPDK_CU_ASSERT_FATAL(range != NULL); 4930 CU_ASSERT(range->offset == 35); 4931 CU_ASSERT(range->length == 10); 4932 4933 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4934 * no longer any active overlapping locks. 4935 */ 4936 g_unlock_lba_range_done = false; 4937 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4938 CU_ASSERT(rc == 0); 4939 poll_threads(); 4940 4941 CU_ASSERT(g_unlock_lba_range_done == true); 4942 CU_ASSERT(g_lock_lba_range_done == true); 4943 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4944 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4945 SPDK_CU_ASSERT_FATAL(range != NULL); 4946 CU_ASSERT(range->offset == 35); 4947 CU_ASSERT(range->length == 10); 4948 4949 /* Finally, unlock 35-44. */ 4950 g_unlock_lba_range_done = false; 4951 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4952 CU_ASSERT(rc == 0); 4953 poll_threads(); 4954 4955 CU_ASSERT(g_unlock_lba_range_done == true); 4956 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4957 4958 spdk_put_io_channel(io_ch); 4959 spdk_bdev_close(desc); 4960 free_bdev(bdev); 4961 ut_fini_bdev(); 4962 } 4963 4964 static void 4965 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4966 { 4967 g_abort_done = true; 4968 g_abort_status = bdev_io->internal.status; 4969 spdk_bdev_free_io(bdev_io); 4970 } 4971 4972 static void 4973 bdev_io_abort(void) 4974 { 4975 struct spdk_bdev *bdev; 4976 struct spdk_bdev_desc *desc = NULL; 4977 struct spdk_io_channel *io_ch; 4978 struct spdk_bdev_channel *channel; 4979 struct spdk_bdev_mgmt_channel *mgmt_ch; 4980 struct spdk_bdev_opts bdev_opts = {}; 4981 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 4982 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4983 int rc; 4984 4985 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4986 bdev_opts.bdev_io_pool_size = 7; 4987 bdev_opts.bdev_io_cache_size = 2; 4988 ut_init_bdev(&bdev_opts); 4989 4990 bdev = allocate_bdev("bdev0"); 4991 4992 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4993 CU_ASSERT(rc == 0); 4994 CU_ASSERT(desc != NULL); 4995 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4996 io_ch = spdk_bdev_get_io_channel(desc); 4997 CU_ASSERT(io_ch != NULL); 4998 channel = spdk_io_channel_get_ctx(io_ch); 4999 mgmt_ch = channel->shared_resource->mgmt_ch; 5000 5001 g_abort_done = false; 5002 5003 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5004 5005 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5006 CU_ASSERT(rc == -ENOTSUP); 5007 5008 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5009 5010 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5011 CU_ASSERT(rc == 0); 5012 CU_ASSERT(g_abort_done == true); 5013 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5014 5015 /* Test the case that the target I/O was successfully aborted. */ 5016 g_io_done = false; 5017 5018 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5019 CU_ASSERT(rc == 0); 5020 CU_ASSERT(g_io_done == false); 5021 5022 g_abort_done = false; 5023 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5024 5025 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5026 CU_ASSERT(rc == 0); 5027 CU_ASSERT(g_io_done == true); 5028 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5029 stub_complete_io(1); 5030 CU_ASSERT(g_abort_done == true); 5031 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5032 5033 /* Test the case that the target I/O was not aborted because it completed 5034 * in the middle of execution of the abort. 5035 */ 5036 g_io_done = false; 5037 5038 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5039 CU_ASSERT(rc == 0); 5040 CU_ASSERT(g_io_done == false); 5041 5042 g_abort_done = false; 5043 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5044 5045 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5046 CU_ASSERT(rc == 0); 5047 CU_ASSERT(g_io_done == false); 5048 5049 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5050 stub_complete_io(1); 5051 CU_ASSERT(g_io_done == true); 5052 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5053 5054 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5055 stub_complete_io(1); 5056 CU_ASSERT(g_abort_done == true); 5057 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5058 5059 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5060 5061 bdev->optimal_io_boundary = 16; 5062 bdev->split_on_optimal_io_boundary = true; 5063 5064 /* Test that a single-vector command which is split is aborted correctly. 5065 * Offset 14, length 8, payload 0xF000 5066 * Child - Offset 14, length 2, payload 0xF000 5067 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5068 */ 5069 g_io_done = false; 5070 5071 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5072 CU_ASSERT(rc == 0); 5073 CU_ASSERT(g_io_done == false); 5074 5075 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5076 5077 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5078 5079 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5080 CU_ASSERT(rc == 0); 5081 CU_ASSERT(g_io_done == true); 5082 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5083 stub_complete_io(2); 5084 CU_ASSERT(g_abort_done == true); 5085 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5086 5087 /* Test that a multi-vector command that needs to be split by strip and then 5088 * needs to be split is aborted correctly. Abort is requested before the second 5089 * child I/O was submitted. The parent I/O should complete with failure without 5090 * submitting the second child I/O. 5091 */ 5092 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5093 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5094 iov[i].iov_len = 512; 5095 } 5096 5097 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5098 g_io_done = false; 5099 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5100 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5101 CU_ASSERT(rc == 0); 5102 CU_ASSERT(g_io_done == false); 5103 5104 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5105 5106 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5107 5108 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5109 CU_ASSERT(rc == 0); 5110 CU_ASSERT(g_io_done == true); 5111 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5112 stub_complete_io(1); 5113 CU_ASSERT(g_abort_done == true); 5114 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5115 5116 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5117 5118 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5119 5120 bdev->optimal_io_boundary = 16; 5121 g_io_done = false; 5122 5123 /* Test that a ingle-vector command which is split is aborted correctly. 5124 * Differently from the above, the child abort request will be submitted 5125 * sequentially due to the capacity of spdk_bdev_io. 5126 */ 5127 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5128 CU_ASSERT(rc == 0); 5129 CU_ASSERT(g_io_done == false); 5130 5131 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5132 5133 g_abort_done = false; 5134 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5135 5136 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5137 CU_ASSERT(rc == 0); 5138 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5140 5141 stub_complete_io(1); 5142 CU_ASSERT(g_io_done == true); 5143 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5144 stub_complete_io(3); 5145 CU_ASSERT(g_abort_done == true); 5146 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5147 5148 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5149 5150 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5151 5152 spdk_put_io_channel(io_ch); 5153 spdk_bdev_close(desc); 5154 free_bdev(bdev); 5155 ut_fini_bdev(); 5156 } 5157 5158 static void 5159 bdev_unmap(void) 5160 { 5161 struct spdk_bdev *bdev; 5162 struct spdk_bdev_desc *desc = NULL; 5163 struct spdk_io_channel *ioch; 5164 struct spdk_bdev_channel *bdev_ch; 5165 struct ut_expected_io *expected_io; 5166 struct spdk_bdev_opts bdev_opts = {}; 5167 uint32_t i, num_outstanding; 5168 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5169 int rc; 5170 5171 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5172 bdev_opts.bdev_io_pool_size = 512; 5173 bdev_opts.bdev_io_cache_size = 64; 5174 ut_init_bdev(&bdev_opts); 5175 5176 bdev = allocate_bdev("bdev"); 5177 5178 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5179 CU_ASSERT_EQUAL(rc, 0); 5180 SPDK_CU_ASSERT_FATAL(desc != NULL); 5181 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5182 ioch = spdk_bdev_get_io_channel(desc); 5183 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5184 bdev_ch = spdk_io_channel_get_ctx(ioch); 5185 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5186 5187 fn_table.submit_request = stub_submit_request; 5188 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5189 5190 /* Case 1: First test the request won't be split */ 5191 num_blocks = 32; 5192 5193 g_io_done = false; 5194 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5195 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5196 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5197 CU_ASSERT_EQUAL(rc, 0); 5198 CU_ASSERT(g_io_done == false); 5199 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5200 stub_complete_io(1); 5201 CU_ASSERT(g_io_done == true); 5202 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5203 5204 /* Case 2: Test the split with 2 children requests */ 5205 bdev->max_unmap = 8; 5206 bdev->max_unmap_segments = 2; 5207 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5208 num_blocks = max_unmap_blocks * 2; 5209 offset = 0; 5210 5211 g_io_done = false; 5212 for (i = 0; i < 2; i++) { 5213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5214 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5215 offset += max_unmap_blocks; 5216 } 5217 5218 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5219 CU_ASSERT_EQUAL(rc, 0); 5220 CU_ASSERT(g_io_done == false); 5221 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5222 stub_complete_io(2); 5223 CU_ASSERT(g_io_done == true); 5224 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5225 5226 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5227 num_children = 15; 5228 num_blocks = max_unmap_blocks * num_children; 5229 g_io_done = false; 5230 offset = 0; 5231 for (i = 0; i < num_children; i++) { 5232 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5233 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5234 offset += max_unmap_blocks; 5235 } 5236 5237 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5238 CU_ASSERT_EQUAL(rc, 0); 5239 CU_ASSERT(g_io_done == false); 5240 5241 while (num_children > 0) { 5242 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5243 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5244 stub_complete_io(num_outstanding); 5245 num_children -= num_outstanding; 5246 } 5247 CU_ASSERT(g_io_done == true); 5248 5249 spdk_put_io_channel(ioch); 5250 spdk_bdev_close(desc); 5251 free_bdev(bdev); 5252 ut_fini_bdev(); 5253 } 5254 5255 static void 5256 bdev_write_zeroes_split_test(void) 5257 { 5258 struct spdk_bdev *bdev; 5259 struct spdk_bdev_desc *desc = NULL; 5260 struct spdk_io_channel *ioch; 5261 struct spdk_bdev_channel *bdev_ch; 5262 struct ut_expected_io *expected_io; 5263 struct spdk_bdev_opts bdev_opts = {}; 5264 uint32_t i, num_outstanding; 5265 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5266 int rc; 5267 5268 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5269 bdev_opts.bdev_io_pool_size = 512; 5270 bdev_opts.bdev_io_cache_size = 64; 5271 ut_init_bdev(&bdev_opts); 5272 5273 bdev = allocate_bdev("bdev"); 5274 5275 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5276 CU_ASSERT_EQUAL(rc, 0); 5277 SPDK_CU_ASSERT_FATAL(desc != NULL); 5278 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5279 ioch = spdk_bdev_get_io_channel(desc); 5280 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5281 bdev_ch = spdk_io_channel_get_ctx(ioch); 5282 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5283 5284 fn_table.submit_request = stub_submit_request; 5285 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5286 5287 /* Case 1: First test the request won't be split */ 5288 num_blocks = 32; 5289 5290 g_io_done = false; 5291 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5292 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5293 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5294 CU_ASSERT_EQUAL(rc, 0); 5295 CU_ASSERT(g_io_done == false); 5296 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5297 stub_complete_io(1); 5298 CU_ASSERT(g_io_done == true); 5299 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5300 5301 /* Case 2: Test the split with 2 children requests */ 5302 max_write_zeroes_blocks = 8; 5303 bdev->max_write_zeroes = max_write_zeroes_blocks; 5304 num_blocks = max_write_zeroes_blocks * 2; 5305 offset = 0; 5306 5307 g_io_done = false; 5308 for (i = 0; i < 2; i++) { 5309 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5310 0); 5311 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5312 offset += max_write_zeroes_blocks; 5313 } 5314 5315 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5316 CU_ASSERT_EQUAL(rc, 0); 5317 CU_ASSERT(g_io_done == false); 5318 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5319 stub_complete_io(2); 5320 CU_ASSERT(g_io_done == true); 5321 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5322 5323 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5324 num_children = 15; 5325 num_blocks = max_write_zeroes_blocks * num_children; 5326 g_io_done = false; 5327 offset = 0; 5328 for (i = 0; i < num_children; i++) { 5329 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5330 0); 5331 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5332 offset += max_write_zeroes_blocks; 5333 } 5334 5335 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5336 CU_ASSERT_EQUAL(rc, 0); 5337 CU_ASSERT(g_io_done == false); 5338 5339 while (num_children > 0) { 5340 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5341 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5342 stub_complete_io(num_outstanding); 5343 num_children -= num_outstanding; 5344 } 5345 CU_ASSERT(g_io_done == true); 5346 5347 spdk_put_io_channel(ioch); 5348 spdk_bdev_close(desc); 5349 free_bdev(bdev); 5350 ut_fini_bdev(); 5351 } 5352 5353 static void 5354 bdev_set_options_test(void) 5355 { 5356 struct spdk_bdev_opts bdev_opts = {}; 5357 int rc; 5358 5359 /* Case1: Do not set opts_size */ 5360 rc = spdk_bdev_set_opts(&bdev_opts); 5361 CU_ASSERT(rc == -1); 5362 5363 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5364 bdev_opts.bdev_io_pool_size = 4; 5365 bdev_opts.bdev_io_cache_size = 2; 5366 bdev_opts.small_buf_pool_size = 4; 5367 5368 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5369 rc = spdk_bdev_set_opts(&bdev_opts); 5370 CU_ASSERT(rc == -1); 5371 5372 /* Case 3: Do not set valid large_buf_pool_size */ 5373 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5374 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5375 rc = spdk_bdev_set_opts(&bdev_opts); 5376 CU_ASSERT(rc == -1); 5377 5378 /* Case4: set valid large buf_pool_size */ 5379 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5380 rc = spdk_bdev_set_opts(&bdev_opts); 5381 CU_ASSERT(rc == 0); 5382 5383 /* Case5: Set different valid value for small and large buf pool */ 5384 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5385 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5386 rc = spdk_bdev_set_opts(&bdev_opts); 5387 CU_ASSERT(rc == 0); 5388 } 5389 5390 static uint64_t 5391 get_ns_time(void) 5392 { 5393 int rc; 5394 struct timespec ts; 5395 5396 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5397 CU_ASSERT(rc == 0); 5398 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5399 } 5400 5401 static int 5402 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5403 { 5404 int h1, h2; 5405 5406 if (bdev_name == NULL) { 5407 return -1; 5408 } else { 5409 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5410 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5411 5412 return spdk_max(h1, h2) + 1; 5413 } 5414 } 5415 5416 static void 5417 bdev_multi_allocation(void) 5418 { 5419 const int max_bdev_num = 1024 * 16; 5420 char name[max_bdev_num][16]; 5421 char noexist_name[] = "invalid_bdev"; 5422 struct spdk_bdev *bdev[max_bdev_num]; 5423 int i, j; 5424 uint64_t last_time; 5425 int bdev_num; 5426 int height; 5427 5428 for (j = 0; j < max_bdev_num; j++) { 5429 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5430 } 5431 5432 for (i = 0; i < 16; i++) { 5433 last_time = get_ns_time(); 5434 bdev_num = 1024 * (i + 1); 5435 for (j = 0; j < bdev_num; j++) { 5436 bdev[j] = allocate_bdev(name[j]); 5437 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5438 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5439 } 5440 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5441 (get_ns_time() - last_time) / 1000 / 1000); 5442 for (j = 0; j < bdev_num; j++) { 5443 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5444 } 5445 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5446 5447 for (j = 0; j < bdev_num; j++) { 5448 free_bdev(bdev[j]); 5449 } 5450 for (j = 0; j < bdev_num; j++) { 5451 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5452 } 5453 } 5454 } 5455 5456 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5457 5458 static int 5459 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5460 int array_size) 5461 { 5462 if (array_size > 0 && domains) { 5463 domains[0] = g_bdev_memory_domain; 5464 } 5465 5466 return 1; 5467 } 5468 5469 static void 5470 bdev_get_memory_domains(void) 5471 { 5472 struct spdk_bdev_fn_table fn_table = { 5473 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5474 }; 5475 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5476 struct spdk_memory_domain *domains[2] = {}; 5477 int rc; 5478 5479 /* bdev is NULL */ 5480 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5481 CU_ASSERT(rc == -EINVAL); 5482 5483 /* domains is NULL */ 5484 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5485 CU_ASSERT(rc == 1); 5486 5487 /* array size is 0 */ 5488 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5489 CU_ASSERT(rc == 1); 5490 5491 /* get_supported_dma_device_types op is set */ 5492 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5493 CU_ASSERT(rc == 1); 5494 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5495 5496 /* get_supported_dma_device_types op is not set */ 5497 fn_table.get_memory_domains = NULL; 5498 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5499 CU_ASSERT(rc == 0); 5500 } 5501 5502 static void 5503 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5504 { 5505 struct spdk_bdev *bdev; 5506 struct spdk_bdev_desc *desc = NULL; 5507 struct spdk_io_channel *io_ch; 5508 char io_buf[512]; 5509 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5510 struct ut_expected_io *expected_io; 5511 int rc; 5512 5513 ut_init_bdev(NULL); 5514 5515 bdev = allocate_bdev("bdev0"); 5516 bdev->md_interleave = false; 5517 bdev->md_len = 8; 5518 5519 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5520 CU_ASSERT(rc == 0); 5521 SPDK_CU_ASSERT_FATAL(desc != NULL); 5522 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5523 io_ch = spdk_bdev_get_io_channel(desc); 5524 CU_ASSERT(io_ch != NULL); 5525 5526 /* read */ 5527 g_io_done = false; 5528 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5529 if (ext_io_opts) { 5530 expected_io->md_buf = ext_io_opts->metadata; 5531 } 5532 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5533 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5534 5535 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5536 5537 CU_ASSERT(rc == 0); 5538 CU_ASSERT(g_io_done == false); 5539 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5540 stub_complete_io(1); 5541 CU_ASSERT(g_io_done == true); 5542 5543 /* write */ 5544 g_io_done = false; 5545 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5546 if (ext_io_opts) { 5547 expected_io->md_buf = ext_io_opts->metadata; 5548 } 5549 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5550 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5551 5552 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5553 5554 CU_ASSERT(rc == 0); 5555 CU_ASSERT(g_io_done == false); 5556 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5557 stub_complete_io(1); 5558 CU_ASSERT(g_io_done == true); 5559 5560 spdk_put_io_channel(io_ch); 5561 spdk_bdev_close(desc); 5562 free_bdev(bdev); 5563 ut_fini_bdev(); 5564 5565 } 5566 5567 static void 5568 bdev_io_ext(void) 5569 { 5570 struct spdk_bdev_ext_io_opts ext_io_opts = { 5571 .metadata = (void *)0xFF000000, 5572 .size = sizeof(ext_io_opts) 5573 }; 5574 5575 _bdev_io_ext(&ext_io_opts); 5576 } 5577 5578 static void 5579 bdev_io_ext_no_opts(void) 5580 { 5581 _bdev_io_ext(NULL); 5582 } 5583 5584 static void 5585 bdev_io_ext_invalid_opts(void) 5586 { 5587 struct spdk_bdev *bdev; 5588 struct spdk_bdev_desc *desc = NULL; 5589 struct spdk_io_channel *io_ch; 5590 char io_buf[512]; 5591 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5592 struct spdk_bdev_ext_io_opts ext_io_opts = { 5593 .metadata = (void *)0xFF000000, 5594 .size = sizeof(ext_io_opts) 5595 }; 5596 int rc; 5597 5598 ut_init_bdev(NULL); 5599 5600 bdev = allocate_bdev("bdev0"); 5601 bdev->md_interleave = false; 5602 bdev->md_len = 8; 5603 5604 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5605 CU_ASSERT(rc == 0); 5606 SPDK_CU_ASSERT_FATAL(desc != NULL); 5607 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5608 io_ch = spdk_bdev_get_io_channel(desc); 5609 CU_ASSERT(io_ch != NULL); 5610 5611 /* Test invalid ext_opts size */ 5612 ext_io_opts.size = 0; 5613 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5614 CU_ASSERT(rc == -EINVAL); 5615 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5616 CU_ASSERT(rc == -EINVAL); 5617 5618 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5619 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5620 CU_ASSERT(rc == -EINVAL); 5621 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5622 CU_ASSERT(rc == -EINVAL); 5623 5624 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5625 sizeof(ext_io_opts.metadata) - 1; 5626 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5627 CU_ASSERT(rc == -EINVAL); 5628 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5629 CU_ASSERT(rc == -EINVAL); 5630 5631 spdk_put_io_channel(io_ch); 5632 spdk_bdev_close(desc); 5633 free_bdev(bdev); 5634 ut_fini_bdev(); 5635 } 5636 5637 static void 5638 bdev_io_ext_split(void) 5639 { 5640 struct spdk_bdev *bdev; 5641 struct spdk_bdev_desc *desc = NULL; 5642 struct spdk_io_channel *io_ch; 5643 char io_buf[512]; 5644 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5645 struct ut_expected_io *expected_io; 5646 struct spdk_bdev_ext_io_opts ext_io_opts = { 5647 .metadata = (void *)0xFF000000, 5648 .size = sizeof(ext_io_opts) 5649 }; 5650 int rc; 5651 5652 ut_init_bdev(NULL); 5653 5654 bdev = allocate_bdev("bdev0"); 5655 bdev->md_interleave = false; 5656 bdev->md_len = 8; 5657 5658 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5659 CU_ASSERT(rc == 0); 5660 SPDK_CU_ASSERT_FATAL(desc != NULL); 5661 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5662 io_ch = spdk_bdev_get_io_channel(desc); 5663 CU_ASSERT(io_ch != NULL); 5664 5665 /* Check that IO request with ext_opts and metadata is split correctly 5666 * Offset 14, length 8, payload 0xF000 5667 * Child - Offset 14, length 2, payload 0xF000 5668 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5669 */ 5670 bdev->optimal_io_boundary = 16; 5671 bdev->split_on_optimal_io_boundary = true; 5672 bdev->md_interleave = false; 5673 bdev->md_len = 8; 5674 5675 iov.iov_base = (void *)0xF000; 5676 iov.iov_len = 4096; 5677 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5678 ext_io_opts.metadata = (void *)0xFF000000; 5679 ext_io_opts.size = sizeof(ext_io_opts); 5680 g_io_done = false; 5681 5682 /* read */ 5683 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5684 expected_io->md_buf = ext_io_opts.metadata; 5685 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5686 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5687 5688 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5689 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5690 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5691 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5692 5693 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5694 CU_ASSERT(rc == 0); 5695 CU_ASSERT(g_io_done == false); 5696 5697 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5698 stub_complete_io(2); 5699 CU_ASSERT(g_io_done == true); 5700 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5701 5702 /* write */ 5703 g_io_done = false; 5704 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5705 expected_io->md_buf = ext_io_opts.metadata; 5706 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5707 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5708 5709 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5710 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5711 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5712 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5713 5714 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5715 CU_ASSERT(rc == 0); 5716 CU_ASSERT(g_io_done == false); 5717 5718 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5719 stub_complete_io(2); 5720 CU_ASSERT(g_io_done == true); 5721 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5722 5723 spdk_put_io_channel(io_ch); 5724 spdk_bdev_close(desc); 5725 free_bdev(bdev); 5726 ut_fini_bdev(); 5727 } 5728 5729 static void 5730 bdev_io_ext_bounce_buffer(void) 5731 { 5732 struct spdk_bdev *bdev; 5733 struct spdk_bdev_desc *desc = NULL; 5734 struct spdk_io_channel *io_ch; 5735 char io_buf[512]; 5736 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5737 struct ut_expected_io *expected_io; 5738 struct spdk_bdev_ext_io_opts ext_io_opts = { 5739 .metadata = (void *)0xFF000000, 5740 .size = sizeof(ext_io_opts) 5741 }; 5742 int rc; 5743 5744 ut_init_bdev(NULL); 5745 5746 bdev = allocate_bdev("bdev0"); 5747 bdev->md_interleave = false; 5748 bdev->md_len = 8; 5749 5750 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5751 CU_ASSERT(rc == 0); 5752 SPDK_CU_ASSERT_FATAL(desc != NULL); 5753 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5754 io_ch = spdk_bdev_get_io_channel(desc); 5755 CU_ASSERT(io_ch != NULL); 5756 5757 /* Verify data pull/push 5758 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5759 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5760 5761 /* read */ 5762 g_io_done = false; 5763 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5764 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5765 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5766 5767 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5768 5769 CU_ASSERT(rc == 0); 5770 CU_ASSERT(g_io_done == false); 5771 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5772 stub_complete_io(1); 5773 CU_ASSERT(g_memory_domain_push_data_called == true); 5774 CU_ASSERT(g_io_done == true); 5775 5776 /* write */ 5777 g_io_done = false; 5778 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5779 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5780 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5781 5782 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5783 5784 CU_ASSERT(rc == 0); 5785 CU_ASSERT(g_memory_domain_pull_data_called == true); 5786 CU_ASSERT(g_io_done == false); 5787 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5788 stub_complete_io(1); 5789 CU_ASSERT(g_io_done == true); 5790 5791 spdk_put_io_channel(io_ch); 5792 spdk_bdev_close(desc); 5793 free_bdev(bdev); 5794 ut_fini_bdev(); 5795 } 5796 5797 static void 5798 bdev_register_uuid_alias(void) 5799 { 5800 struct spdk_bdev *bdev, *second; 5801 char uuid[SPDK_UUID_STRING_LEN]; 5802 int rc; 5803 5804 ut_init_bdev(NULL); 5805 bdev = allocate_bdev("bdev0"); 5806 5807 /* Make sure an UUID was generated */ 5808 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5809 5810 /* Check that an UUID alias was registered */ 5811 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5812 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5813 5814 /* Unregister the bdev */ 5815 spdk_bdev_unregister(bdev, NULL, NULL); 5816 poll_threads(); 5817 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5818 5819 /* Check the same, but this time register the bdev with non-zero UUID */ 5820 rc = spdk_bdev_register(bdev); 5821 CU_ASSERT_EQUAL(rc, 0); 5822 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5823 5824 /* Unregister the bdev */ 5825 spdk_bdev_unregister(bdev, NULL, NULL); 5826 poll_threads(); 5827 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5828 5829 /* Regiser the bdev using UUID as the name */ 5830 bdev->name = uuid; 5831 rc = spdk_bdev_register(bdev); 5832 CU_ASSERT_EQUAL(rc, 0); 5833 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5834 5835 /* Unregister the bdev */ 5836 spdk_bdev_unregister(bdev, NULL, NULL); 5837 poll_threads(); 5838 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5839 5840 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5841 bdev->name = "bdev0"; 5842 second = allocate_bdev("bdev1"); 5843 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5844 rc = spdk_bdev_register(bdev); 5845 CU_ASSERT_EQUAL(rc, -EEXIST); 5846 5847 /* Regenerate the UUID and re-check */ 5848 spdk_uuid_generate(&bdev->uuid); 5849 rc = spdk_bdev_register(bdev); 5850 CU_ASSERT_EQUAL(rc, 0); 5851 5852 /* And check that both bdevs can be retrieved through their UUIDs */ 5853 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5854 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5855 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5856 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5857 5858 free_bdev(second); 5859 free_bdev(bdev); 5860 ut_fini_bdev(); 5861 } 5862 5863 static void 5864 bdev_unregister_by_name(void) 5865 { 5866 struct spdk_bdev *bdev; 5867 int rc; 5868 5869 bdev = allocate_bdev("bdev"); 5870 5871 g_event_type1 = 0xFF; 5872 g_unregister_arg = NULL; 5873 g_unregister_rc = -1; 5874 5875 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5876 CU_ASSERT(rc == -ENODEV); 5877 5878 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5879 CU_ASSERT(rc == -ENODEV); 5880 5881 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5882 CU_ASSERT(rc == 0); 5883 5884 /* Check that unregister callback is delayed */ 5885 CU_ASSERT(g_unregister_arg == NULL); 5886 CU_ASSERT(g_unregister_rc == -1); 5887 5888 poll_threads(); 5889 5890 /* Event callback shall not be issued because device was closed */ 5891 CU_ASSERT(g_event_type1 == 0xFF); 5892 /* Unregister callback is issued */ 5893 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5894 CU_ASSERT(g_unregister_rc == 0); 5895 5896 free_bdev(bdev); 5897 } 5898 5899 static int 5900 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5901 { 5902 int *count = ctx; 5903 5904 (*count)++; 5905 5906 return 0; 5907 } 5908 5909 static void 5910 for_each_bdev_test(void) 5911 { 5912 struct spdk_bdev *bdev[8]; 5913 int rc, count; 5914 5915 bdev[0] = allocate_bdev("bdev0"); 5916 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 5917 5918 bdev[1] = allocate_bdev("bdev1"); 5919 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5920 CU_ASSERT(rc == 0); 5921 5922 bdev[2] = allocate_bdev("bdev2"); 5923 5924 bdev[3] = allocate_bdev("bdev3"); 5925 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5926 CU_ASSERT(rc == 0); 5927 5928 bdev[4] = allocate_bdev("bdev4"); 5929 5930 bdev[5] = allocate_bdev("bdev5"); 5931 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5932 CU_ASSERT(rc == 0); 5933 5934 bdev[6] = allocate_bdev("bdev6"); 5935 5936 bdev[7] = allocate_bdev("bdev7"); 5937 5938 count = 0; 5939 rc = spdk_for_each_bdev(&count, count_bdevs); 5940 CU_ASSERT(rc == 0); 5941 CU_ASSERT(count == 7); 5942 5943 count = 0; 5944 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5945 CU_ASSERT(rc == 0); 5946 CU_ASSERT(count == 4); 5947 5948 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 5949 free_bdev(bdev[0]); 5950 free_bdev(bdev[1]); 5951 free_bdev(bdev[2]); 5952 free_bdev(bdev[3]); 5953 free_bdev(bdev[4]); 5954 free_bdev(bdev[5]); 5955 free_bdev(bdev[6]); 5956 free_bdev(bdev[7]); 5957 } 5958 5959 static void 5960 bdev_seek_test(void) 5961 { 5962 struct spdk_bdev *bdev; 5963 struct spdk_bdev_desc *desc = NULL; 5964 struct spdk_io_channel *io_ch; 5965 int rc; 5966 5967 ut_init_bdev(NULL); 5968 poll_threads(); 5969 5970 bdev = allocate_bdev("bdev0"); 5971 5972 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5973 CU_ASSERT(rc == 0); 5974 poll_threads(); 5975 SPDK_CU_ASSERT_FATAL(desc != NULL); 5976 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5977 io_ch = spdk_bdev_get_io_channel(desc); 5978 CU_ASSERT(io_ch != NULL); 5979 5980 /* Seek data not supported */ 5981 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 5982 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 5983 CU_ASSERT(rc == 0); 5984 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5985 poll_threads(); 5986 CU_ASSERT(g_seek_offset == 0); 5987 5988 /* Seek hole not supported */ 5989 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 5990 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 5991 CU_ASSERT(rc == 0); 5992 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5993 poll_threads(); 5994 CU_ASSERT(g_seek_offset == UINT64_MAX); 5995 5996 /* Seek data supported */ 5997 g_seek_data_offset = 12345; 5998 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 5999 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6000 CU_ASSERT(rc == 0); 6001 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6002 stub_complete_io(1); 6003 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6004 CU_ASSERT(g_seek_offset == 12345); 6005 6006 /* Seek hole supported */ 6007 g_seek_hole_offset = 67890; 6008 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6009 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6010 CU_ASSERT(rc == 0); 6011 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6012 stub_complete_io(1); 6013 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6014 CU_ASSERT(g_seek_offset == 67890); 6015 6016 spdk_put_io_channel(io_ch); 6017 spdk_bdev_close(desc); 6018 free_bdev(bdev); 6019 ut_fini_bdev(); 6020 } 6021 6022 static void 6023 bdev_copy(void) 6024 { 6025 struct spdk_bdev *bdev; 6026 struct spdk_bdev_desc *desc = NULL; 6027 struct spdk_io_channel *ioch; 6028 struct ut_expected_io *expected_io; 6029 uint64_t src_offset, num_blocks; 6030 uint32_t num_completed; 6031 int rc; 6032 6033 ut_init_bdev(NULL); 6034 bdev = allocate_bdev("bdev"); 6035 6036 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6037 CU_ASSERT_EQUAL(rc, 0); 6038 SPDK_CU_ASSERT_FATAL(desc != NULL); 6039 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6040 ioch = spdk_bdev_get_io_channel(desc); 6041 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6042 6043 fn_table.submit_request = stub_submit_request; 6044 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6045 6046 /* First test that if the bdev supports copy, the request won't be split */ 6047 bdev->md_len = 0; 6048 bdev->blocklen = 512; 6049 num_blocks = 128; 6050 src_offset = bdev->blockcnt - num_blocks; 6051 6052 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6053 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6054 6055 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6056 CU_ASSERT_EQUAL(rc, 0); 6057 num_completed = stub_complete_io(1); 6058 CU_ASSERT_EQUAL(num_completed, 1); 6059 6060 /* Check that if copy is not supported it'll still work */ 6061 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6062 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6063 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6064 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6065 6066 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6067 6068 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6069 CU_ASSERT_EQUAL(rc, 0); 6070 num_completed = stub_complete_io(1); 6071 CU_ASSERT_EQUAL(num_completed, 1); 6072 num_completed = stub_complete_io(1); 6073 CU_ASSERT_EQUAL(num_completed, 1); 6074 6075 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6076 spdk_put_io_channel(ioch); 6077 spdk_bdev_close(desc); 6078 free_bdev(bdev); 6079 ut_fini_bdev(); 6080 } 6081 6082 static void 6083 bdev_copy_split_test(void) 6084 { 6085 struct spdk_bdev *bdev; 6086 struct spdk_bdev_desc *desc = NULL; 6087 struct spdk_io_channel *ioch; 6088 struct spdk_bdev_channel *bdev_ch; 6089 struct ut_expected_io *expected_io; 6090 struct spdk_bdev_opts bdev_opts = {}; 6091 uint32_t i, num_outstanding; 6092 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6093 int rc; 6094 6095 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6096 bdev_opts.bdev_io_pool_size = 512; 6097 bdev_opts.bdev_io_cache_size = 64; 6098 rc = spdk_bdev_set_opts(&bdev_opts); 6099 CU_ASSERT(rc == 0); 6100 6101 ut_init_bdev(NULL); 6102 bdev = allocate_bdev("bdev"); 6103 6104 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6105 CU_ASSERT_EQUAL(rc, 0); 6106 SPDK_CU_ASSERT_FATAL(desc != NULL); 6107 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6108 ioch = spdk_bdev_get_io_channel(desc); 6109 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6110 bdev_ch = spdk_io_channel_get_ctx(ioch); 6111 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6112 6113 fn_table.submit_request = stub_submit_request; 6114 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6115 6116 /* Case 1: First test the request won't be split */ 6117 num_blocks = 32; 6118 src_offset = bdev->blockcnt - num_blocks; 6119 6120 g_io_done = false; 6121 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6122 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6123 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6124 CU_ASSERT_EQUAL(rc, 0); 6125 CU_ASSERT(g_io_done == false); 6126 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6127 stub_complete_io(1); 6128 CU_ASSERT(g_io_done == true); 6129 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6130 6131 /* Case 2: Test the split with 2 children requests */ 6132 max_copy_blocks = 8; 6133 bdev->max_copy = max_copy_blocks; 6134 num_children = 2; 6135 num_blocks = max_copy_blocks * num_children; 6136 offset = 0; 6137 src_offset = bdev->blockcnt - num_blocks; 6138 6139 g_io_done = false; 6140 for (i = 0; i < num_children; i++) { 6141 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6142 src_offset + offset, max_copy_blocks); 6143 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6144 offset += max_copy_blocks; 6145 } 6146 6147 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6148 CU_ASSERT_EQUAL(rc, 0); 6149 CU_ASSERT(g_io_done == false); 6150 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6151 stub_complete_io(num_children); 6152 CU_ASSERT(g_io_done == true); 6153 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6154 6155 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6156 num_children = 15; 6157 num_blocks = max_copy_blocks * num_children; 6158 offset = 0; 6159 src_offset = bdev->blockcnt - num_blocks; 6160 6161 g_io_done = false; 6162 for (i = 0; i < num_children; i++) { 6163 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6164 src_offset + offset, max_copy_blocks); 6165 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6166 offset += max_copy_blocks; 6167 } 6168 6169 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6170 CU_ASSERT_EQUAL(rc, 0); 6171 CU_ASSERT(g_io_done == false); 6172 6173 while (num_children > 0) { 6174 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6175 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6176 stub_complete_io(num_outstanding); 6177 num_children -= num_outstanding; 6178 } 6179 CU_ASSERT(g_io_done == true); 6180 6181 spdk_put_io_channel(ioch); 6182 spdk_bdev_close(desc); 6183 free_bdev(bdev); 6184 ut_fini_bdev(); 6185 } 6186 6187 static void 6188 examine_claim_v1(struct spdk_bdev *bdev) 6189 { 6190 int rc; 6191 6192 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6193 CU_ASSERT(rc == 0); 6194 } 6195 6196 static void 6197 examine_no_lock_held(struct spdk_bdev *bdev) 6198 { 6199 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6200 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6201 } 6202 6203 struct examine_claim_v2_ctx { 6204 struct ut_examine_ctx examine_ctx; 6205 enum spdk_bdev_claim_type claim_type; 6206 struct spdk_bdev_desc *desc; 6207 }; 6208 6209 static void 6210 examine_claim_v2(struct spdk_bdev *bdev) 6211 { 6212 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6213 int rc; 6214 6215 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6216 CU_ASSERT(rc == 0); 6217 6218 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6219 CU_ASSERT(rc == 0); 6220 } 6221 6222 static void 6223 examine_locks(void) 6224 { 6225 struct spdk_bdev *bdev; 6226 struct ut_examine_ctx ctx = { 0 }; 6227 struct examine_claim_v2_ctx v2_ctx; 6228 6229 /* Without any claims, one code path is taken */ 6230 ctx.examine_config = examine_no_lock_held; 6231 ctx.examine_disk = examine_no_lock_held; 6232 bdev = allocate_bdev_ctx("bdev0", &ctx); 6233 CU_ASSERT(ctx.examine_config_count == 1); 6234 CU_ASSERT(ctx.examine_disk_count == 1); 6235 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6236 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6237 free_bdev(bdev); 6238 6239 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6240 memset(&ctx, 0, sizeof(ctx)); 6241 ctx.examine_config = examine_claim_v1; 6242 ctx.examine_disk = examine_no_lock_held; 6243 bdev = allocate_bdev_ctx("bdev0", &ctx); 6244 CU_ASSERT(ctx.examine_config_count == 1); 6245 CU_ASSERT(ctx.examine_disk_count == 1); 6246 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6247 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6248 spdk_bdev_module_release_bdev(bdev); 6249 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6250 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6251 free_bdev(bdev); 6252 6253 /* Exercise the final path that comes with v2 claims. */ 6254 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6255 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6256 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6257 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6258 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6259 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6260 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6261 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6262 spdk_bdev_close(v2_ctx.desc); 6263 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6264 free_bdev(bdev); 6265 } 6266 6267 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6268 do { \ 6269 uint32_t len = 0; \ 6270 struct spdk_bdev_module_claim *claim; \ 6271 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6272 len++; \ 6273 } \ 6274 CU_ASSERT(len == expect); \ 6275 } while (0) 6276 6277 static void 6278 claim_v2_rwo(void) 6279 { 6280 struct spdk_bdev *bdev; 6281 struct spdk_bdev_desc *desc; 6282 struct spdk_bdev_desc *desc2; 6283 struct spdk_bdev_claim_opts opts; 6284 int rc; 6285 6286 bdev = allocate_bdev("bdev0"); 6287 6288 /* Claim without options */ 6289 desc = NULL; 6290 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6291 CU_ASSERT(rc == 0); 6292 SPDK_CU_ASSERT_FATAL(desc != NULL); 6293 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6294 &bdev_ut_if); 6295 CU_ASSERT(rc == 0); 6296 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6297 CU_ASSERT(desc->claim != NULL); 6298 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6299 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6300 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6301 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6302 6303 /* Release the claim by closing the descriptor */ 6304 spdk_bdev_close(desc); 6305 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6306 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6307 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6308 6309 /* Claim with options */ 6310 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6311 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6312 desc = NULL; 6313 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6314 CU_ASSERT(rc == 0); 6315 SPDK_CU_ASSERT_FATAL(desc != NULL); 6316 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6317 &bdev_ut_if); 6318 CU_ASSERT(rc == 0); 6319 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6320 CU_ASSERT(desc->claim != NULL); 6321 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6322 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6323 memset(&opts, 0, sizeof(opts)); 6324 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6325 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6326 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6327 6328 /* The claim blocks new writers. */ 6329 desc2 = NULL; 6330 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6331 CU_ASSERT(rc == -EPERM); 6332 CU_ASSERT(desc2 == NULL); 6333 6334 /* New readers are allowed */ 6335 desc2 = NULL; 6336 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6337 CU_ASSERT(rc == 0); 6338 CU_ASSERT(desc2 != NULL); 6339 CU_ASSERT(!desc2->write); 6340 6341 /* No new v2 RWO claims are allowed */ 6342 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6343 &bdev_ut_if); 6344 CU_ASSERT(rc == -EPERM); 6345 6346 /* No new v2 ROM claims are allowed */ 6347 CU_ASSERT(!desc2->write); 6348 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6349 &bdev_ut_if); 6350 CU_ASSERT(rc == -EPERM); 6351 CU_ASSERT(!desc2->write); 6352 6353 /* No new v2 RWM claims are allowed */ 6354 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6355 opts.shared_claim_key = (uint64_t)&opts; 6356 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6357 &bdev_ut_if); 6358 CU_ASSERT(rc == -EPERM); 6359 CU_ASSERT(!desc2->write); 6360 6361 /* No new v1 claims are allowed */ 6362 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6363 CU_ASSERT(rc == -EPERM); 6364 6365 /* None of the above changed the existing claim */ 6366 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6367 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6368 6369 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6370 spdk_bdev_close(desc); 6371 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6372 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6373 CU_ASSERT(!desc2->write); 6374 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6375 &bdev_ut_if); 6376 CU_ASSERT(rc == 0); 6377 CU_ASSERT(desc2->claim != NULL); 6378 CU_ASSERT(desc2->write); 6379 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6380 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6381 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6382 spdk_bdev_close(desc2); 6383 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6384 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6385 6386 /* Cannot claim with a key */ 6387 desc = NULL; 6388 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6389 CU_ASSERT(rc == 0); 6390 SPDK_CU_ASSERT_FATAL(desc != NULL); 6391 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6392 opts.shared_claim_key = (uint64_t)&opts; 6393 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6394 &bdev_ut_if); 6395 CU_ASSERT(rc == -EINVAL); 6396 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6397 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6398 spdk_bdev_close(desc); 6399 6400 /* Clean up */ 6401 free_bdev(bdev); 6402 } 6403 6404 static void 6405 claim_v2_rom(void) 6406 { 6407 struct spdk_bdev *bdev; 6408 struct spdk_bdev_desc *desc; 6409 struct spdk_bdev_desc *desc2; 6410 struct spdk_bdev_claim_opts opts; 6411 int rc; 6412 6413 bdev = allocate_bdev("bdev0"); 6414 6415 /* Claim without options */ 6416 desc = NULL; 6417 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6418 CU_ASSERT(rc == 0); 6419 SPDK_CU_ASSERT_FATAL(desc != NULL); 6420 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6421 &bdev_ut_if); 6422 CU_ASSERT(rc == 0); 6423 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6424 CU_ASSERT(desc->claim != NULL); 6425 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6426 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6427 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6428 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6429 6430 /* Release the claim by closing the descriptor */ 6431 spdk_bdev_close(desc); 6432 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6433 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6434 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6435 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6436 6437 /* Claim with options */ 6438 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6439 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6440 desc = NULL; 6441 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6442 CU_ASSERT(rc == 0); 6443 SPDK_CU_ASSERT_FATAL(desc != NULL); 6444 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6445 &bdev_ut_if); 6446 CU_ASSERT(rc == 0); 6447 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6448 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6449 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6450 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6451 memset(&opts, 0, sizeof(opts)); 6452 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6453 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6454 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6455 6456 /* The claim blocks new writers. */ 6457 desc2 = NULL; 6458 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6459 CU_ASSERT(rc == -EPERM); 6460 CU_ASSERT(desc2 == NULL); 6461 6462 /* New readers are allowed */ 6463 desc2 = NULL; 6464 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6465 CU_ASSERT(rc == 0); 6466 CU_ASSERT(desc2 != NULL); 6467 CU_ASSERT(!desc2->write); 6468 6469 /* No new v2 RWO claims are allowed */ 6470 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6471 &bdev_ut_if); 6472 CU_ASSERT(rc == -EPERM); 6473 6474 /* No new v2 RWM claims are allowed */ 6475 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6476 opts.shared_claim_key = (uint64_t)&opts; 6477 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6478 &bdev_ut_if); 6479 CU_ASSERT(rc == -EPERM); 6480 CU_ASSERT(!desc2->write); 6481 6482 /* No new v1 claims are allowed */ 6483 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6484 CU_ASSERT(rc == -EPERM); 6485 6486 /* None of the above messed up the existing claim */ 6487 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6488 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6489 6490 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 6491 CU_ASSERT(!desc2->write); 6492 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6493 &bdev_ut_if); 6494 CU_ASSERT(rc == 0); 6495 CU_ASSERT(!desc2->write); 6496 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6497 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6498 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6499 6500 /* Claim remains when closing the first descriptor */ 6501 spdk_bdev_close(desc); 6502 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6503 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6504 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6505 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6506 6507 /* Claim removed when closing the other descriptor */ 6508 spdk_bdev_close(desc2); 6509 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6510 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6511 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6512 6513 /* Cannot claim with a key */ 6514 desc = NULL; 6515 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6516 CU_ASSERT(rc == 0); 6517 SPDK_CU_ASSERT_FATAL(desc != NULL); 6518 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6519 opts.shared_claim_key = (uint64_t)&opts; 6520 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6521 &bdev_ut_if); 6522 CU_ASSERT(rc == -EINVAL); 6523 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6524 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6525 spdk_bdev_close(desc); 6526 6527 /* Cannot claim with a read-write descriptor */ 6528 desc = NULL; 6529 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6530 CU_ASSERT(rc == 0); 6531 SPDK_CU_ASSERT_FATAL(desc != NULL); 6532 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6533 &bdev_ut_if); 6534 CU_ASSERT(rc == -EINVAL); 6535 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6536 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6537 spdk_bdev_close(desc); 6538 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6539 6540 /* Clean up */ 6541 free_bdev(bdev); 6542 } 6543 6544 static void 6545 claim_v2_rwm(void) 6546 { 6547 struct spdk_bdev *bdev; 6548 struct spdk_bdev_desc *desc; 6549 struct spdk_bdev_desc *desc2; 6550 struct spdk_bdev_claim_opts opts; 6551 char good_key, bad_key; 6552 int rc; 6553 6554 bdev = allocate_bdev("bdev0"); 6555 6556 /* Claim without options should fail */ 6557 desc = NULL; 6558 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6559 CU_ASSERT(rc == 0); 6560 SPDK_CU_ASSERT_FATAL(desc != NULL); 6561 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 6562 &bdev_ut_if); 6563 CU_ASSERT(rc == -EINVAL); 6564 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6565 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6566 CU_ASSERT(desc->claim == NULL); 6567 6568 /* Claim with options */ 6569 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6570 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6571 opts.shared_claim_key = (uint64_t)&good_key; 6572 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6573 &bdev_ut_if); 6574 CU_ASSERT(rc == 0); 6575 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6576 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6577 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6578 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6579 memset(&opts, 0, sizeof(opts)); 6580 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6581 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6582 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6583 6584 /* The claim blocks new writers. */ 6585 desc2 = NULL; 6586 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6587 CU_ASSERT(rc == -EPERM); 6588 CU_ASSERT(desc2 == NULL); 6589 6590 /* New readers are allowed */ 6591 desc2 = NULL; 6592 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6593 CU_ASSERT(rc == 0); 6594 CU_ASSERT(desc2 != NULL); 6595 CU_ASSERT(!desc2->write); 6596 6597 /* No new v2 RWO claims are allowed */ 6598 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6599 &bdev_ut_if); 6600 CU_ASSERT(rc == -EPERM); 6601 6602 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 6603 CU_ASSERT(!desc2->write); 6604 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6605 &bdev_ut_if); 6606 CU_ASSERT(rc == -EPERM); 6607 CU_ASSERT(!desc2->write); 6608 6609 /* No new v1 claims are allowed */ 6610 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6611 CU_ASSERT(rc == -EPERM); 6612 6613 /* No new v2 RWM claims are allowed if the key does not match */ 6614 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6615 opts.shared_claim_key = (uint64_t)&bad_key; 6616 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6617 &bdev_ut_if); 6618 CU_ASSERT(rc == -EPERM); 6619 CU_ASSERT(!desc2->write); 6620 6621 /* None of the above messed up the existing claim */ 6622 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6623 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6624 6625 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 6626 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6627 opts.shared_claim_key = (uint64_t)&good_key; 6628 CU_ASSERT(!desc2->write); 6629 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6630 &bdev_ut_if); 6631 CU_ASSERT(rc == 0); 6632 CU_ASSERT(desc2->write); 6633 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6634 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6635 6636 /* Claim remains when closing the first descriptor */ 6637 spdk_bdev_close(desc); 6638 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6639 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6640 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6641 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6642 6643 /* Claim removed when closing the other descriptor */ 6644 spdk_bdev_close(desc2); 6645 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6646 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6647 6648 /* Cannot claim without a key */ 6649 desc = NULL; 6650 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6651 CU_ASSERT(rc == 0); 6652 SPDK_CU_ASSERT_FATAL(desc != NULL); 6653 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6654 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6655 &bdev_ut_if); 6656 CU_ASSERT(rc == -EINVAL); 6657 spdk_bdev_close(desc); 6658 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6659 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6660 6661 /* Clean up */ 6662 free_bdev(bdev); 6663 } 6664 6665 static void 6666 claim_v2_existing_writer(void) 6667 { 6668 struct spdk_bdev *bdev; 6669 struct spdk_bdev_desc *desc; 6670 struct spdk_bdev_desc *desc2; 6671 struct spdk_bdev_claim_opts opts; 6672 enum spdk_bdev_claim_type type; 6673 enum spdk_bdev_claim_type types[] = { 6674 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6675 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6676 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6677 }; 6678 size_t i; 6679 int rc; 6680 6681 bdev = allocate_bdev("bdev0"); 6682 6683 desc = NULL; 6684 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6685 CU_ASSERT(rc == 0); 6686 SPDK_CU_ASSERT_FATAL(desc != NULL); 6687 desc2 = NULL; 6688 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6689 CU_ASSERT(rc == 0); 6690 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 6691 6692 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6693 type = types[i]; 6694 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6695 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6696 opts.shared_claim_key = (uint64_t)&opts; 6697 } 6698 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6699 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 6700 CU_ASSERT(rc == -EINVAL); 6701 } else { 6702 CU_ASSERT(rc == -EPERM); 6703 } 6704 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6705 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 6706 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 6707 CU_ASSERT(rc == -EINVAL); 6708 } else { 6709 CU_ASSERT(rc == -EPERM); 6710 } 6711 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6712 } 6713 6714 spdk_bdev_close(desc); 6715 spdk_bdev_close(desc2); 6716 6717 /* Clean up */ 6718 free_bdev(bdev); 6719 } 6720 6721 static void 6722 claim_v2_existing_v1(void) 6723 { 6724 struct spdk_bdev *bdev; 6725 struct spdk_bdev_desc *desc; 6726 struct spdk_bdev_claim_opts opts; 6727 enum spdk_bdev_claim_type type; 6728 enum spdk_bdev_claim_type types[] = { 6729 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6730 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6731 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6732 }; 6733 size_t i; 6734 int rc; 6735 6736 bdev = allocate_bdev("bdev0"); 6737 6738 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6739 CU_ASSERT(rc == 0); 6740 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6741 6742 desc = NULL; 6743 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6744 CU_ASSERT(rc == 0); 6745 SPDK_CU_ASSERT_FATAL(desc != NULL); 6746 6747 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6748 type = types[i]; 6749 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6750 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6751 opts.shared_claim_key = (uint64_t)&opts; 6752 } 6753 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6754 CU_ASSERT(rc == -EPERM); 6755 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6756 } 6757 6758 spdk_bdev_module_release_bdev(bdev); 6759 spdk_bdev_close(desc); 6760 6761 /* Clean up */ 6762 free_bdev(bdev); 6763 } 6764 6765 static void 6766 claim_v1_existing_v2(void) 6767 { 6768 struct spdk_bdev *bdev; 6769 struct spdk_bdev_desc *desc; 6770 struct spdk_bdev_claim_opts opts; 6771 enum spdk_bdev_claim_type type; 6772 enum spdk_bdev_claim_type types[] = { 6773 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6774 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6775 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6776 }; 6777 size_t i; 6778 int rc; 6779 6780 bdev = allocate_bdev("bdev0"); 6781 6782 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6783 type = types[i]; 6784 6785 desc = NULL; 6786 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6787 CU_ASSERT(rc == 0); 6788 SPDK_CU_ASSERT_FATAL(desc != NULL); 6789 6790 /* Get a v2 claim */ 6791 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6792 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6793 opts.shared_claim_key = (uint64_t)&opts; 6794 } 6795 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6796 CU_ASSERT(rc == 0); 6797 6798 /* Fail to get a v1 claim */ 6799 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6800 CU_ASSERT(rc == -EPERM); 6801 6802 spdk_bdev_close(desc); 6803 6804 /* Now v1 succeeds */ 6805 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6806 CU_ASSERT(rc == 0) 6807 spdk_bdev_module_release_bdev(bdev); 6808 } 6809 6810 /* Clean up */ 6811 free_bdev(bdev); 6812 } 6813 6814 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 6815 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 6816 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 6817 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 6818 6819 #define UT_MAX_EXAMINE_MODS 2 6820 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 6821 { 6822 .name = "vbdev_ut_examine0", 6823 .module_init = vbdev_ut_module_init, 6824 .module_fini = vbdev_ut_module_fini, 6825 .examine_config = ut_examine_claimed_config0, 6826 .examine_disk = ut_examine_claimed_disk0, 6827 }, 6828 { 6829 .name = "vbdev_ut_examine1", 6830 .module_init = vbdev_ut_module_init, 6831 .module_fini = vbdev_ut_module_fini, 6832 .examine_config = ut_examine_claimed_config1, 6833 .examine_disk = ut_examine_claimed_disk1, 6834 } 6835 }; 6836 6837 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 6838 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 6839 6840 struct ut_examine_claimed_ctx { 6841 uint32_t examine_config_count; 6842 uint32_t examine_disk_count; 6843 6844 /* Claim type to take, with these options */ 6845 enum spdk_bdev_claim_type claim_type; 6846 struct spdk_bdev_claim_opts claim_opts; 6847 6848 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 6849 int expect_claim_err; 6850 6851 /* Descriptor used for a claim */ 6852 struct spdk_bdev_desc *desc; 6853 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 6854 6855 bool ut_testing_examine_claimed; 6856 6857 static void 6858 reset_examine_claimed_ctx(void) 6859 { 6860 struct ut_examine_claimed_ctx *ctx; 6861 uint32_t i; 6862 6863 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 6864 ctx = &examine_claimed_ctx[i]; 6865 if (ctx->desc != NULL) { 6866 spdk_bdev_close(ctx->desc); 6867 } 6868 memset(ctx, 0, sizeof(*ctx)); 6869 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 6870 } 6871 } 6872 6873 static void 6874 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 6875 { 6876 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 6877 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 6878 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 6879 int rc; 6880 6881 if (!ut_testing_examine_claimed) { 6882 spdk_bdev_module_examine_done(module); 6883 return; 6884 } 6885 6886 ctx->examine_config_count++; 6887 6888 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 6889 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 6890 &ctx->desc); 6891 CU_ASSERT(rc == 0); 6892 6893 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 6894 CU_ASSERT(rc == ctx->expect_claim_err); 6895 } 6896 spdk_bdev_module_examine_done(module); 6897 } 6898 6899 static void 6900 ut_examine_claimed_config0(struct spdk_bdev *bdev) 6901 { 6902 examine_claimed_config(bdev, 0); 6903 } 6904 6905 static void 6906 ut_examine_claimed_config1(struct spdk_bdev *bdev) 6907 { 6908 examine_claimed_config(bdev, 1); 6909 } 6910 6911 static void 6912 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 6913 { 6914 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 6915 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 6916 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 6917 6918 if (!ut_testing_examine_claimed) { 6919 spdk_bdev_module_examine_done(module); 6920 return; 6921 } 6922 6923 ctx->examine_disk_count++; 6924 6925 spdk_bdev_module_examine_done(module); 6926 } 6927 6928 static void 6929 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 6930 { 6931 examine_claimed_disk(bdev, 0); 6932 } 6933 6934 static void 6935 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 6936 { 6937 examine_claimed_disk(bdev, 1); 6938 } 6939 6940 static void 6941 examine_claimed(void) 6942 { 6943 struct spdk_bdev *bdev; 6944 struct spdk_bdev_module *mod = examine_claimed_mods; 6945 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 6946 6947 ut_testing_examine_claimed = true; 6948 reset_examine_claimed_ctx(); 6949 6950 /* 6951 * With one module claiming, both modules' examine_config should be called, but only the 6952 * claiming module's examine_disk should be called. 6953 */ 6954 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6955 bdev = allocate_bdev("bdev0"); 6956 CU_ASSERT(ctx[0].examine_config_count == 1); 6957 CU_ASSERT(ctx[0].examine_disk_count == 1); 6958 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 6959 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 6960 CU_ASSERT(ctx[1].examine_config_count == 1); 6961 CU_ASSERT(ctx[1].examine_disk_count == 0); 6962 CU_ASSERT(ctx[1].desc == NULL); 6963 reset_examine_claimed_ctx(); 6964 free_bdev(bdev); 6965 6966 /* 6967 * With two modules claiming, both modules' examine_config and examine_disk should be 6968 * called. 6969 */ 6970 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6971 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6972 bdev = allocate_bdev("bdev0"); 6973 CU_ASSERT(ctx[0].examine_config_count == 1); 6974 CU_ASSERT(ctx[0].examine_disk_count == 1); 6975 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 6976 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 6977 CU_ASSERT(ctx[1].examine_config_count == 1); 6978 CU_ASSERT(ctx[1].examine_disk_count == 1); 6979 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 6980 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 6981 reset_examine_claimed_ctx(); 6982 free_bdev(bdev); 6983 6984 /* 6985 * If two vbdev modules try to claim with conflicting claim types, the module that was added 6986 * last wins. The winner gets the claim and is the only one that has its examine_disk 6987 * callback invoked. 6988 */ 6989 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6990 ctx[0].expect_claim_err = -EPERM; 6991 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 6992 bdev = allocate_bdev("bdev0"); 6993 CU_ASSERT(ctx[0].examine_config_count == 1); 6994 CU_ASSERT(ctx[0].examine_disk_count == 0); 6995 CU_ASSERT(ctx[1].examine_config_count == 1); 6996 CU_ASSERT(ctx[1].examine_disk_count == 1); 6997 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 6998 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 6999 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7000 reset_examine_claimed_ctx(); 7001 free_bdev(bdev); 7002 7003 ut_testing_examine_claimed = false; 7004 } 7005 7006 int 7007 main(int argc, char **argv) 7008 { 7009 CU_pSuite suite = NULL; 7010 unsigned int num_failures; 7011 7012 CU_set_error_action(CUEA_ABORT); 7013 CU_initialize_registry(); 7014 7015 suite = CU_add_suite("bdev", null_init, null_clean); 7016 7017 CU_ADD_TEST(suite, bytes_to_blocks_test); 7018 CU_ADD_TEST(suite, num_blocks_test); 7019 CU_ADD_TEST(suite, io_valid_test); 7020 CU_ADD_TEST(suite, open_write_test); 7021 CU_ADD_TEST(suite, claim_test); 7022 CU_ADD_TEST(suite, alias_add_del_test); 7023 CU_ADD_TEST(suite, get_device_stat_test); 7024 CU_ADD_TEST(suite, bdev_io_types_test); 7025 CU_ADD_TEST(suite, bdev_io_wait_test); 7026 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7027 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7028 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7029 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7030 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7031 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7032 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7033 CU_ADD_TEST(suite, bdev_io_alignment); 7034 CU_ADD_TEST(suite, bdev_histograms); 7035 CU_ADD_TEST(suite, bdev_write_zeroes); 7036 CU_ADD_TEST(suite, bdev_compare_and_write); 7037 CU_ADD_TEST(suite, bdev_compare); 7038 CU_ADD_TEST(suite, bdev_compare_emulated); 7039 CU_ADD_TEST(suite, bdev_zcopy_write); 7040 CU_ADD_TEST(suite, bdev_zcopy_read); 7041 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7042 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7043 CU_ADD_TEST(suite, bdev_open_ext); 7044 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7045 CU_ADD_TEST(suite, bdev_set_io_timeout); 7046 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7047 CU_ADD_TEST(suite, lba_range_overlap); 7048 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7049 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7050 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7051 CU_ADD_TEST(suite, bdev_io_abort); 7052 CU_ADD_TEST(suite, bdev_unmap); 7053 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7054 CU_ADD_TEST(suite, bdev_set_options_test); 7055 CU_ADD_TEST(suite, bdev_multi_allocation); 7056 CU_ADD_TEST(suite, bdev_get_memory_domains); 7057 CU_ADD_TEST(suite, bdev_io_ext); 7058 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7059 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7060 CU_ADD_TEST(suite, bdev_io_ext_split); 7061 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7062 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7063 CU_ADD_TEST(suite, bdev_unregister_by_name); 7064 CU_ADD_TEST(suite, for_each_bdev_test); 7065 CU_ADD_TEST(suite, bdev_seek_test); 7066 CU_ADD_TEST(suite, bdev_copy); 7067 CU_ADD_TEST(suite, bdev_copy_split_test); 7068 CU_ADD_TEST(suite, examine_locks); 7069 CU_ADD_TEST(suite, claim_v2_rwo); 7070 CU_ADD_TEST(suite, claim_v2_rom); 7071 CU_ADD_TEST(suite, claim_v2_rwm); 7072 CU_ADD_TEST(suite, claim_v2_existing_writer); 7073 CU_ADD_TEST(suite, claim_v2_existing_v1); 7074 CU_ADD_TEST(suite, claim_v1_existing_v2); 7075 CU_ADD_TEST(suite, examine_claimed); 7076 7077 allocate_cores(1); 7078 allocate_threads(1); 7079 set_thread(0); 7080 7081 CU_basic_set_mode(CU_BRM_VERBOSE); 7082 CU_basic_run_tests(); 7083 num_failures = CU_get_number_of_failures(); 7084 CU_cleanup_registry(); 7085 7086 free_threads(); 7087 free_cores(); 7088 7089 return num_failures; 7090 } 7091