1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 #include "common/lib/bdev/common_stubs.h" 19 20 static bool g_memory_domain_pull_data_called; 21 static bool g_memory_domain_push_data_called; 22 static int g_accel_io_device; 23 24 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 25 int 26 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 27 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 28 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 29 { 30 g_memory_domain_pull_data_called = true; 31 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 32 cpl_cb(cpl_cb_arg, 0); 33 return 0; 34 } 35 36 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 37 int 38 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 39 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 40 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 41 { 42 g_memory_domain_push_data_called = true; 43 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 44 cpl_cb(cpl_cb_arg, 0); 45 return 0; 46 } 47 48 struct spdk_io_channel * 49 spdk_accel_get_io_channel(void) 50 { 51 return spdk_get_io_channel(&g_accel_io_device); 52 } 53 54 int g_status; 55 int g_count; 56 enum spdk_bdev_event_type g_event_type1; 57 enum spdk_bdev_event_type g_event_type2; 58 enum spdk_bdev_event_type g_event_type3; 59 enum spdk_bdev_event_type g_event_type4; 60 struct spdk_histogram_data *g_histogram; 61 void *g_unregister_arg; 62 int g_unregister_rc; 63 64 void 65 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 66 int *sc, int *sk, int *asc, int *ascq) 67 { 68 } 69 70 static int 71 ut_accel_ch_create_cb(void *io_device, void *ctx) 72 { 73 return 0; 74 } 75 76 static void 77 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 78 { 79 } 80 81 static int 82 ut_bdev_setup(void) 83 { 84 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 85 ut_accel_ch_destroy_cb, 0, NULL); 86 return 0; 87 } 88 89 static int 90 ut_bdev_teardown(void) 91 { 92 spdk_io_device_unregister(&g_accel_io_device, NULL); 93 94 return 0; 95 } 96 97 static int 98 stub_destruct(void *ctx) 99 { 100 return 0; 101 } 102 103 struct ut_expected_io { 104 uint8_t type; 105 uint64_t offset; 106 uint64_t src_offset; 107 uint64_t length; 108 int iovcnt; 109 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 110 void *md_buf; 111 TAILQ_ENTRY(ut_expected_io) link; 112 }; 113 114 struct bdev_ut_io { 115 TAILQ_ENTRY(bdev_ut_io) link; 116 }; 117 118 struct bdev_ut_channel { 119 TAILQ_HEAD(, bdev_ut_io) outstanding_io; 120 uint32_t outstanding_io_count; 121 TAILQ_HEAD(, ut_expected_io) expected_io; 122 }; 123 124 static bool g_io_done; 125 static struct spdk_bdev_io *g_bdev_io; 126 static enum spdk_bdev_io_status g_io_status; 127 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 128 static uint32_t g_bdev_ut_io_device; 129 static struct bdev_ut_channel *g_bdev_ut_channel; 130 static void *g_compare_read_buf; 131 static uint32_t g_compare_read_buf_len; 132 static void *g_compare_write_buf; 133 static uint32_t g_compare_write_buf_len; 134 static void *g_compare_md_buf; 135 static bool g_abort_done; 136 static enum spdk_bdev_io_status g_abort_status; 137 static void *g_zcopy_read_buf; 138 static uint32_t g_zcopy_read_buf_len; 139 static void *g_zcopy_write_buf; 140 static uint32_t g_zcopy_write_buf_len; 141 static struct spdk_bdev_io *g_zcopy_bdev_io; 142 static uint64_t g_seek_data_offset; 143 static uint64_t g_seek_hole_offset; 144 static uint64_t g_seek_offset; 145 146 static struct ut_expected_io * 147 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 148 { 149 struct ut_expected_io *expected_io; 150 151 expected_io = calloc(1, sizeof(*expected_io)); 152 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 153 154 expected_io->type = type; 155 expected_io->offset = offset; 156 expected_io->length = length; 157 expected_io->iovcnt = iovcnt; 158 159 return expected_io; 160 } 161 162 static struct ut_expected_io * 163 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 164 { 165 struct ut_expected_io *expected_io; 166 167 expected_io = calloc(1, sizeof(*expected_io)); 168 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 169 170 expected_io->type = type; 171 expected_io->offset = offset; 172 expected_io->src_offset = src_offset; 173 expected_io->length = length; 174 175 return expected_io; 176 } 177 178 static void 179 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 180 { 181 expected_io->iov[pos].iov_base = base; 182 expected_io->iov[pos].iov_len = len; 183 } 184 185 static void 186 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 187 { 188 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 189 struct ut_expected_io *expected_io; 190 struct iovec *iov, *expected_iov; 191 struct spdk_bdev_io *bio_to_abort; 192 struct bdev_ut_io *bio; 193 int i; 194 195 g_bdev_io = bdev_io; 196 197 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 198 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 199 200 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 201 CU_ASSERT(g_compare_read_buf_len == len); 202 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 203 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 204 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 205 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 206 } 207 } 208 209 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 210 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 211 212 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 213 CU_ASSERT(g_compare_write_buf_len == len); 214 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 215 } 216 217 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 218 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 219 220 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 221 CU_ASSERT(g_compare_read_buf_len == len); 222 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 223 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 224 } 225 if (bdev_io->u.bdev.md_buf && 226 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 227 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 228 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 229 } 230 } 231 232 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 233 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 234 TAILQ_FOREACH(bio, &ch->outstanding_io, link) { 235 bio_to_abort = spdk_bdev_io_from_ctx(bio); 236 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 237 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 238 ch->outstanding_io_count--; 239 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 240 break; 241 } 242 } 243 } 244 } 245 246 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 247 if (bdev_io->u.bdev.zcopy.start) { 248 g_zcopy_bdev_io = bdev_io; 249 if (bdev_io->u.bdev.zcopy.populate) { 250 /* Start of a read */ 251 CU_ASSERT(g_zcopy_read_buf != NULL); 252 CU_ASSERT(g_zcopy_read_buf_len > 0); 253 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 254 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 255 bdev_io->u.bdev.iovcnt = 1; 256 } else { 257 /* Start of a write */ 258 CU_ASSERT(g_zcopy_write_buf != NULL); 259 CU_ASSERT(g_zcopy_write_buf_len > 0); 260 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 261 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 262 bdev_io->u.bdev.iovcnt = 1; 263 } 264 } else { 265 if (bdev_io->u.bdev.zcopy.commit) { 266 /* End of write */ 267 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 268 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 269 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 270 g_zcopy_write_buf = NULL; 271 g_zcopy_write_buf_len = 0; 272 } else { 273 /* End of read */ 274 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 275 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 276 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 277 g_zcopy_read_buf = NULL; 278 g_zcopy_read_buf_len = 0; 279 } 280 } 281 } 282 283 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 284 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 285 } 286 287 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 288 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 289 } 290 291 TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct bdev_ut_io *)bdev_io->driver_ctx, link); 292 ch->outstanding_io_count++; 293 294 expected_io = TAILQ_FIRST(&ch->expected_io); 295 if (expected_io == NULL) { 296 return; 297 } 298 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 299 300 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 301 CU_ASSERT(bdev_io->type == expected_io->type); 302 } 303 304 if (expected_io->md_buf != NULL) { 305 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 306 } 307 308 if (expected_io->length == 0) { 309 free(expected_io); 310 return; 311 } 312 313 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 314 CU_ASSERT(expected_io->length == bdev_io->u.bdev.num_blocks); 315 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 316 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 317 } 318 319 if (expected_io->iovcnt == 0) { 320 free(expected_io); 321 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 322 return; 323 } 324 325 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 326 for (i = 0; i < expected_io->iovcnt; i++) { 327 expected_iov = &expected_io->iov[i]; 328 if (bdev_io->internal.f.has_bounce_buf == false) { 329 iov = &bdev_io->u.bdev.iovs[i]; 330 } else { 331 iov = bdev_io->internal.bounce_buf.orig_iovs; 332 } 333 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 334 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 335 } 336 337 free(expected_io); 338 } 339 340 static void 341 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 342 struct spdk_bdev_io *bdev_io, bool success) 343 { 344 CU_ASSERT(success == true); 345 346 stub_submit_request(_ch, bdev_io); 347 } 348 349 static void 350 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 351 { 352 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 353 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 354 } 355 356 static uint32_t 357 stub_complete_io(uint32_t num_to_complete) 358 { 359 struct bdev_ut_channel *ch = g_bdev_ut_channel; 360 struct bdev_ut_io *bio; 361 struct spdk_bdev_io *bdev_io; 362 static enum spdk_bdev_io_status io_status; 363 uint32_t num_completed = 0; 364 365 while (num_completed < num_to_complete) { 366 if (TAILQ_EMPTY(&ch->outstanding_io)) { 367 break; 368 } 369 bio = TAILQ_FIRST(&ch->outstanding_io); 370 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 371 bdev_io = spdk_bdev_io_from_ctx(bio); 372 ch->outstanding_io_count--; 373 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 374 g_io_exp_status; 375 spdk_bdev_io_complete(bdev_io, io_status); 376 num_completed++; 377 } 378 379 return num_completed; 380 } 381 382 static struct spdk_io_channel * 383 bdev_ut_get_io_channel(void *ctx) 384 { 385 return spdk_get_io_channel(&g_bdev_ut_io_device); 386 } 387 388 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 389 [SPDK_BDEV_IO_TYPE_READ] = true, 390 [SPDK_BDEV_IO_TYPE_WRITE] = true, 391 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 392 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 393 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 394 [SPDK_BDEV_IO_TYPE_RESET] = true, 395 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 396 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 397 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 398 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 399 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 400 [SPDK_BDEV_IO_TYPE_ABORT] = true, 401 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 402 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 403 [SPDK_BDEV_IO_TYPE_COPY] = true, 404 }; 405 406 static void 407 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 408 { 409 g_io_types_supported[io_type] = enable; 410 } 411 412 static bool 413 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 414 { 415 return g_io_types_supported[io_type]; 416 } 417 418 static struct spdk_bdev_fn_table fn_table = { 419 .destruct = stub_destruct, 420 .submit_request = stub_submit_request, 421 .get_io_channel = bdev_ut_get_io_channel, 422 .io_type_supported = stub_io_type_supported, 423 }; 424 425 static int 426 bdev_ut_create_ch(void *io_device, void *ctx_buf) 427 { 428 struct bdev_ut_channel *ch = ctx_buf; 429 430 CU_ASSERT(g_bdev_ut_channel == NULL); 431 g_bdev_ut_channel = ch; 432 433 TAILQ_INIT(&ch->outstanding_io); 434 ch->outstanding_io_count = 0; 435 TAILQ_INIT(&ch->expected_io); 436 return 0; 437 } 438 439 static void 440 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 441 { 442 CU_ASSERT(g_bdev_ut_channel != NULL); 443 g_bdev_ut_channel = NULL; 444 } 445 446 struct spdk_bdev_module bdev_ut_if; 447 448 static int 449 bdev_ut_module_init(void) 450 { 451 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 452 sizeof(struct bdev_ut_channel), NULL); 453 spdk_bdev_module_init_done(&bdev_ut_if); 454 return 0; 455 } 456 457 static void 458 bdev_ut_module_fini(void) 459 { 460 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 461 } 462 463 struct spdk_bdev_module bdev_ut_if = { 464 .name = "bdev_ut", 465 .module_init = bdev_ut_module_init, 466 .module_fini = bdev_ut_module_fini, 467 .async_init = true, 468 }; 469 470 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 471 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 472 473 static int 474 vbdev_ut_module_init(void) 475 { 476 return 0; 477 } 478 479 static void 480 vbdev_ut_module_fini(void) 481 { 482 } 483 484 static int 485 vbdev_ut_get_ctx_size(void) 486 { 487 return sizeof(struct bdev_ut_io); 488 } 489 490 struct spdk_bdev_module vbdev_ut_if = { 491 .name = "vbdev_ut", 492 .module_init = vbdev_ut_module_init, 493 .module_fini = vbdev_ut_module_fini, 494 .examine_config = vbdev_ut_examine_config, 495 .examine_disk = vbdev_ut_examine_disk, 496 .get_ctx_size = vbdev_ut_get_ctx_size, 497 }; 498 499 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 500 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 501 502 struct ut_examine_ctx { 503 void (*examine_config)(struct spdk_bdev *bdev); 504 void (*examine_disk)(struct spdk_bdev *bdev); 505 uint32_t examine_config_count; 506 uint32_t examine_disk_count; 507 }; 508 509 static void 510 vbdev_ut_examine_config(struct spdk_bdev *bdev) 511 { 512 struct ut_examine_ctx *ctx = bdev->ctxt; 513 514 if (ctx != NULL) { 515 ctx->examine_config_count++; 516 if (ctx->examine_config != NULL) { 517 ctx->examine_config(bdev); 518 } 519 } 520 521 spdk_bdev_module_examine_done(&vbdev_ut_if); 522 } 523 524 static void 525 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 526 { 527 struct ut_examine_ctx *ctx = bdev->ctxt; 528 529 if (ctx != NULL) { 530 ctx->examine_disk_count++; 531 if (ctx->examine_disk != NULL) { 532 ctx->examine_disk(bdev); 533 } 534 } 535 536 spdk_bdev_module_examine_done(&vbdev_ut_if); 537 } 538 539 static void 540 bdev_init_cb(void *arg, int rc) 541 { 542 CU_ASSERT(rc == 0); 543 } 544 545 static void 546 bdev_fini_cb(void *arg) 547 { 548 } 549 550 static void 551 ut_init_bdev(struct spdk_bdev_opts *opts) 552 { 553 int rc; 554 555 if (opts != NULL) { 556 rc = spdk_bdev_set_opts(opts); 557 CU_ASSERT(rc == 0); 558 } 559 rc = spdk_iobuf_initialize(); 560 CU_ASSERT(rc == 0); 561 spdk_bdev_initialize(bdev_init_cb, NULL); 562 poll_threads(); 563 } 564 565 static void 566 ut_fini_bdev(void) 567 { 568 spdk_bdev_finish(bdev_fini_cb, NULL); 569 spdk_iobuf_finish(bdev_fini_cb, NULL); 570 poll_threads(); 571 } 572 573 static struct spdk_bdev * 574 allocate_bdev_ctx(char *name, void *ctx) 575 { 576 struct spdk_bdev *bdev; 577 int rc; 578 579 bdev = calloc(1, sizeof(*bdev)); 580 SPDK_CU_ASSERT_FATAL(bdev != NULL); 581 582 bdev->ctxt = ctx; 583 bdev->name = name; 584 bdev->fn_table = &fn_table; 585 bdev->module = &bdev_ut_if; 586 bdev->blockcnt = 1024; 587 bdev->blocklen = 512; 588 589 spdk_uuid_generate(&bdev->uuid); 590 591 rc = spdk_bdev_register(bdev); 592 poll_threads(); 593 CU_ASSERT(rc == 0); 594 595 return bdev; 596 } 597 598 static struct spdk_bdev * 599 allocate_bdev(char *name) 600 { 601 return allocate_bdev_ctx(name, NULL); 602 } 603 604 static struct spdk_bdev * 605 allocate_vbdev(char *name) 606 { 607 struct spdk_bdev *bdev; 608 int rc; 609 610 bdev = calloc(1, sizeof(*bdev)); 611 SPDK_CU_ASSERT_FATAL(bdev != NULL); 612 613 bdev->name = name; 614 bdev->fn_table = &fn_table; 615 bdev->module = &vbdev_ut_if; 616 bdev->blockcnt = 1024; 617 bdev->blocklen = 512; 618 619 rc = spdk_bdev_register(bdev); 620 poll_threads(); 621 CU_ASSERT(rc == 0); 622 623 return bdev; 624 } 625 626 static void 627 free_bdev(struct spdk_bdev *bdev) 628 { 629 spdk_bdev_unregister(bdev, NULL, NULL); 630 poll_threads(); 631 memset(bdev, 0xFF, sizeof(*bdev)); 632 free(bdev); 633 } 634 635 static void 636 free_vbdev(struct spdk_bdev *bdev) 637 { 638 spdk_bdev_unregister(bdev, NULL, NULL); 639 poll_threads(); 640 memset(bdev, 0xFF, sizeof(*bdev)); 641 free(bdev); 642 } 643 644 static void 645 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 646 { 647 const char *bdev_name; 648 649 CU_ASSERT(bdev != NULL); 650 CU_ASSERT(rc == 0); 651 bdev_name = spdk_bdev_get_name(bdev); 652 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 653 654 free(stat); 655 656 *(bool *)cb_arg = true; 657 } 658 659 static void 660 bdev_unregister_cb(void *cb_arg, int rc) 661 { 662 g_unregister_arg = cb_arg; 663 g_unregister_rc = rc; 664 } 665 666 static void 667 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 668 { 669 } 670 671 static void 672 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 673 { 674 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 675 676 g_event_type1 = type; 677 if (SPDK_BDEV_EVENT_REMOVE == type) { 678 spdk_bdev_close(desc); 679 } 680 } 681 682 static void 683 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 684 { 685 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 686 687 g_event_type2 = type; 688 if (SPDK_BDEV_EVENT_REMOVE == type) { 689 spdk_bdev_close(desc); 690 } 691 } 692 693 static void 694 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 695 { 696 g_event_type3 = type; 697 } 698 699 static void 700 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 701 { 702 g_event_type4 = type; 703 } 704 705 static void 706 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 707 { 708 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 709 spdk_bdev_free_io(bdev_io); 710 } 711 712 static void 713 get_device_stat_test(void) 714 { 715 struct spdk_bdev *bdev; 716 struct spdk_bdev_io_stat *stat; 717 bool done; 718 719 bdev = allocate_bdev("bdev0"); 720 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 721 if (stat == NULL) { 722 free_bdev(bdev); 723 return; 724 } 725 726 done = false; 727 spdk_bdev_get_device_stat(bdev, stat, SPDK_BDEV_RESET_STAT_NONE, get_device_stat_cb, &done); 728 while (!done) { poll_threads(); } 729 730 free_bdev(bdev); 731 } 732 733 static void 734 open_write_test(void) 735 { 736 struct spdk_bdev *bdev[9]; 737 struct spdk_bdev_desc *desc[9] = {}; 738 int rc; 739 740 ut_init_bdev(NULL); 741 742 /* 743 * Create a tree of bdevs to test various open w/ write cases. 744 * 745 * bdev0 through bdev3 are physical block devices, such as NVMe 746 * namespaces or Ceph block devices. 747 * 748 * bdev4 is a virtual bdev with multiple base bdevs. This models 749 * caching or RAID use cases. 750 * 751 * bdev5 through bdev7 are all virtual bdevs with the same base 752 * bdev (except bdev7). This models partitioning or logical volume 753 * use cases. 754 * 755 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 756 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 757 * models caching, RAID, partitioning or logical volumes use cases. 758 * 759 * bdev8 is a virtual bdev with multiple base bdevs, but these 760 * base bdevs are themselves virtual bdevs. 761 * 762 * bdev8 763 * | 764 * +----------+ 765 * | | 766 * bdev4 bdev5 bdev6 bdev7 767 * | | | | 768 * +---+---+ +---+ + +---+---+ 769 * | | \ | / \ 770 * bdev0 bdev1 bdev2 bdev3 771 */ 772 773 bdev[0] = allocate_bdev("bdev0"); 774 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 775 CU_ASSERT(rc == 0); 776 777 bdev[1] = allocate_bdev("bdev1"); 778 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 779 CU_ASSERT(rc == 0); 780 781 bdev[2] = allocate_bdev("bdev2"); 782 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 783 CU_ASSERT(rc == 0); 784 785 bdev[3] = allocate_bdev("bdev3"); 786 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 787 CU_ASSERT(rc == 0); 788 789 bdev[4] = allocate_vbdev("bdev4"); 790 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 791 CU_ASSERT(rc == 0); 792 793 bdev[5] = allocate_vbdev("bdev5"); 794 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 795 CU_ASSERT(rc == 0); 796 797 bdev[6] = allocate_vbdev("bdev6"); 798 799 bdev[7] = allocate_vbdev("bdev7"); 800 801 bdev[8] = allocate_vbdev("bdev8"); 802 803 /* Open bdev0 read-only. This should succeed. */ 804 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 805 CU_ASSERT(rc == 0); 806 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 807 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 808 spdk_bdev_close(desc[0]); 809 810 /* 811 * Open bdev1 read/write. This should fail since bdev1 has been claimed 812 * by a vbdev module. 813 */ 814 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 815 CU_ASSERT(rc == -EPERM); 816 817 /* 818 * Open bdev4 read/write. This should fail since bdev3 has been claimed 819 * by a vbdev module. 820 */ 821 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 822 CU_ASSERT(rc == -EPERM); 823 824 /* Open bdev4 read-only. This should succeed. */ 825 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 826 CU_ASSERT(rc == 0); 827 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 828 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 829 spdk_bdev_close(desc[4]); 830 831 /* 832 * Open bdev8 read/write. This should succeed since it is a leaf 833 * bdev. 834 */ 835 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 836 CU_ASSERT(rc == 0); 837 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 838 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 839 spdk_bdev_close(desc[8]); 840 841 /* 842 * Open bdev5 read/write. This should fail since bdev4 has been claimed 843 * by a vbdev module. 844 */ 845 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 846 CU_ASSERT(rc == -EPERM); 847 848 /* Open bdev4 read-only. This should succeed. */ 849 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 850 CU_ASSERT(rc == 0); 851 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 852 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 853 spdk_bdev_close(desc[5]); 854 855 free_vbdev(bdev[8]); 856 857 free_vbdev(bdev[5]); 858 free_vbdev(bdev[6]); 859 free_vbdev(bdev[7]); 860 861 free_vbdev(bdev[4]); 862 863 free_bdev(bdev[0]); 864 free_bdev(bdev[1]); 865 free_bdev(bdev[2]); 866 free_bdev(bdev[3]); 867 868 ut_fini_bdev(); 869 } 870 871 static void 872 claim_test(void) 873 { 874 struct spdk_bdev *bdev; 875 struct spdk_bdev_desc *desc, *open_desc; 876 int rc; 877 uint32_t count; 878 879 ut_init_bdev(NULL); 880 881 /* 882 * A vbdev that uses a read-only bdev may need it to remain read-only. 883 * To do so, it opens the bdev read-only, then claims it without 884 * passing a spdk_bdev_desc. 885 */ 886 bdev = allocate_bdev("bdev0"); 887 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 888 CU_ASSERT(rc == 0); 889 CU_ASSERT(desc->write == false); 890 891 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 892 CU_ASSERT(rc == 0); 893 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 894 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 895 896 /* There should be only one open descriptor and it should still be ro */ 897 count = 0; 898 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 899 CU_ASSERT(open_desc == desc); 900 CU_ASSERT(!open_desc->write); 901 count++; 902 } 903 CU_ASSERT(count == 1); 904 905 /* A read-only bdev is upgraded to read-write if desc is passed. */ 906 spdk_bdev_module_release_bdev(bdev); 907 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 908 CU_ASSERT(rc == 0); 909 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 910 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 911 912 /* There should be only one open descriptor and it should be rw */ 913 count = 0; 914 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 915 CU_ASSERT(open_desc == desc); 916 CU_ASSERT(open_desc->write); 917 count++; 918 } 919 CU_ASSERT(count == 1); 920 921 spdk_bdev_close(desc); 922 free_bdev(bdev); 923 ut_fini_bdev(); 924 } 925 926 static void 927 bytes_to_blocks_test(void) 928 { 929 struct spdk_bdev_desc desc = {0}; 930 struct spdk_bdev bdev = {0}; 931 uint64_t offset_blocks, num_blocks; 932 933 934 desc.bdev = &bdev; 935 memset(&bdev, 0, sizeof(bdev)); 936 937 bdev.blocklen = 512; 938 939 /* All parameters valid */ 940 offset_blocks = 0; 941 num_blocks = 0; 942 CU_ASSERT(bdev_bytes_to_blocks(&desc, 512, &offset_blocks, 1024, &num_blocks) == 0); 943 CU_ASSERT(offset_blocks == 1); 944 CU_ASSERT(num_blocks == 2); 945 946 /* Offset not a block multiple */ 947 CU_ASSERT(bdev_bytes_to_blocks(&desc, 3, &offset_blocks, 512, &num_blocks) != 0); 948 949 /* Length not a block multiple */ 950 CU_ASSERT(bdev_bytes_to_blocks(&desc, 512, &offset_blocks, 3, &num_blocks) != 0); 951 952 /* In case blocklen not the power of two */ 953 bdev.blocklen = 100; 954 CU_ASSERT(bdev_bytes_to_blocks(&desc, 100, &offset_blocks, 200, &num_blocks) == 0); 955 CU_ASSERT(offset_blocks == 1); 956 CU_ASSERT(num_blocks == 2); 957 958 /* Offset not a block multiple */ 959 CU_ASSERT(bdev_bytes_to_blocks(&desc, 3, &offset_blocks, 100, &num_blocks) != 0); 960 961 /* Length not a block multiple */ 962 CU_ASSERT(bdev_bytes_to_blocks(&desc, 100, &offset_blocks, 3, &num_blocks) != 0); 963 } 964 965 static void 966 num_blocks_test(void) 967 { 968 struct spdk_bdev *bdev; 969 struct spdk_bdev_desc *desc = NULL; 970 int rc; 971 972 ut_init_bdev(NULL); 973 bdev = allocate_bdev("num_blocks"); 974 975 spdk_bdev_notify_blockcnt_change(bdev, 50); 976 977 /* Growing block number */ 978 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0); 979 /* Shrinking block number */ 980 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0); 981 982 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 983 CU_ASSERT(rc == 0); 984 SPDK_CU_ASSERT_FATAL(desc != NULL); 985 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 986 987 /* Growing block number */ 988 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0); 989 /* Shrinking block number */ 990 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0); 991 992 g_event_type1 = 0xFF; 993 /* Growing block number */ 994 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0); 995 996 poll_threads(); 997 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 998 999 g_event_type1 = 0xFF; 1000 /* Growing block number and closing */ 1001 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0); 1002 1003 spdk_bdev_close(desc); 1004 free_bdev(bdev); 1005 ut_fini_bdev(); 1006 1007 poll_threads(); 1008 1009 /* Callback is not called for closed device */ 1010 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 1011 } 1012 1013 static void 1014 io_valid_test(void) 1015 { 1016 struct spdk_bdev bdev; 1017 1018 memset(&bdev, 0, sizeof(bdev)); 1019 1020 bdev.blocklen = 512; 1021 spdk_spin_init(&bdev.internal.spinlock); 1022 1023 spdk_bdev_notify_blockcnt_change(&bdev, 100); 1024 1025 /* All parameters valid */ 1026 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 1027 1028 /* Last valid block */ 1029 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 1030 1031 /* Offset past end of bdev */ 1032 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 1033 1034 /* Offset + length past end of bdev */ 1035 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 1036 1037 /* Offset near end of uint64_t range (2^64 - 1) */ 1038 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 1039 1040 spdk_spin_destroy(&bdev.internal.spinlock); 1041 } 1042 1043 static void 1044 alias_add_del_test(void) 1045 { 1046 struct spdk_bdev *bdev[3]; 1047 int rc; 1048 1049 ut_init_bdev(NULL); 1050 1051 /* Creating and registering bdevs */ 1052 bdev[0] = allocate_bdev("bdev0"); 1053 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 1054 1055 bdev[1] = allocate_bdev("bdev1"); 1056 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 1057 1058 bdev[2] = allocate_bdev("bdev2"); 1059 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 1060 1061 poll_threads(); 1062 1063 /* 1064 * Trying adding an alias identical to name. 1065 * Alias is identical to name, so it can not be added to aliases list 1066 */ 1067 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 1068 CU_ASSERT(rc == -EEXIST); 1069 1070 /* 1071 * Trying to add empty alias, 1072 * this one should fail 1073 */ 1074 rc = spdk_bdev_alias_add(bdev[0], NULL); 1075 CU_ASSERT(rc == -EINVAL); 1076 1077 /* Trying adding same alias to two different registered bdevs */ 1078 1079 /* Alias is used first time, so this one should pass */ 1080 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1081 CU_ASSERT(rc == 0); 1082 1083 /* Alias was added to another bdev, so this one should fail */ 1084 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1085 CU_ASSERT(rc == -EEXIST); 1086 1087 /* Alias is used first time, so this one should pass */ 1088 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1089 CU_ASSERT(rc == 0); 1090 1091 /* Trying removing an alias from registered bdevs */ 1092 1093 /* Alias is not on a bdev aliases list, so this one should fail */ 1094 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1095 CU_ASSERT(rc == -ENOENT); 1096 1097 /* Alias is present on a bdev aliases list, so this one should pass */ 1098 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1099 CU_ASSERT(rc == 0); 1100 1101 /* Alias is present on a bdev aliases list, so this one should pass */ 1102 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1103 CU_ASSERT(rc == 0); 1104 1105 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1106 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1107 CU_ASSERT(rc != 0); 1108 1109 /* Trying to del all alias from empty alias list */ 1110 spdk_bdev_alias_del_all(bdev[2]); 1111 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1112 1113 /* Trying to del all alias from non-empty alias list */ 1114 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1115 CU_ASSERT(rc == 0); 1116 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1117 CU_ASSERT(rc == 0); 1118 spdk_bdev_alias_del_all(bdev[2]); 1119 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1120 1121 /* Unregister and free bdevs */ 1122 spdk_bdev_unregister(bdev[0], NULL, NULL); 1123 spdk_bdev_unregister(bdev[1], NULL, NULL); 1124 spdk_bdev_unregister(bdev[2], NULL, NULL); 1125 1126 poll_threads(); 1127 1128 free(bdev[0]); 1129 free(bdev[1]); 1130 free(bdev[2]); 1131 1132 ut_fini_bdev(); 1133 } 1134 1135 static void 1136 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1137 { 1138 g_io_done = true; 1139 g_io_status = bdev_io->internal.status; 1140 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1141 (bdev_io->u.bdev.zcopy.start)) { 1142 g_zcopy_bdev_io = bdev_io; 1143 } else { 1144 spdk_bdev_free_io(bdev_io); 1145 g_zcopy_bdev_io = NULL; 1146 } 1147 } 1148 1149 struct bdev_ut_io_wait_entry { 1150 struct spdk_bdev_io_wait_entry entry; 1151 struct spdk_io_channel *io_ch; 1152 struct spdk_bdev_desc *desc; 1153 bool submitted; 1154 }; 1155 1156 static void 1157 io_wait_cb(void *arg) 1158 { 1159 struct bdev_ut_io_wait_entry *entry = arg; 1160 int rc; 1161 1162 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1163 CU_ASSERT(rc == 0); 1164 entry->submitted = true; 1165 } 1166 1167 static void 1168 bdev_io_types_test(void) 1169 { 1170 struct spdk_bdev *bdev; 1171 struct spdk_bdev_desc *desc = NULL; 1172 struct spdk_io_channel *io_ch; 1173 struct spdk_bdev_opts bdev_opts = {}; 1174 int rc; 1175 1176 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1177 bdev_opts.bdev_io_pool_size = 4; 1178 bdev_opts.bdev_io_cache_size = 2; 1179 ut_init_bdev(&bdev_opts); 1180 1181 bdev = allocate_bdev("bdev0"); 1182 1183 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1184 CU_ASSERT(rc == 0); 1185 poll_threads(); 1186 SPDK_CU_ASSERT_FATAL(desc != NULL); 1187 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1188 io_ch = spdk_bdev_get_io_channel(desc); 1189 CU_ASSERT(io_ch != NULL); 1190 1191 /* WRITE and WRITE ZEROES are not supported */ 1192 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1193 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1194 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1195 CU_ASSERT(rc == -ENOTSUP); 1196 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1197 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1198 1199 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1200 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1201 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1202 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1203 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1204 CU_ASSERT(rc == -ENOTSUP); 1205 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1206 CU_ASSERT(rc == -ENOTSUP); 1207 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1208 CU_ASSERT(rc == -ENOTSUP); 1209 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1210 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1211 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1212 1213 spdk_put_io_channel(io_ch); 1214 spdk_bdev_close(desc); 1215 free_bdev(bdev); 1216 ut_fini_bdev(); 1217 } 1218 1219 static void 1220 bdev_io_wait_test(void) 1221 { 1222 struct spdk_bdev *bdev; 1223 struct spdk_bdev_desc *desc = NULL; 1224 struct spdk_io_channel *io_ch; 1225 struct spdk_bdev_opts bdev_opts = {}; 1226 struct bdev_ut_io_wait_entry io_wait_entry; 1227 struct bdev_ut_io_wait_entry io_wait_entry2; 1228 int rc; 1229 1230 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1231 bdev_opts.bdev_io_pool_size = 4; 1232 bdev_opts.bdev_io_cache_size = 2; 1233 ut_init_bdev(&bdev_opts); 1234 1235 bdev = allocate_bdev("bdev0"); 1236 1237 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1238 CU_ASSERT(rc == 0); 1239 poll_threads(); 1240 SPDK_CU_ASSERT_FATAL(desc != NULL); 1241 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1242 io_ch = spdk_bdev_get_io_channel(desc); 1243 CU_ASSERT(io_ch != NULL); 1244 1245 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1246 CU_ASSERT(rc == 0); 1247 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1248 CU_ASSERT(rc == 0); 1249 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1250 CU_ASSERT(rc == 0); 1251 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1252 CU_ASSERT(rc == 0); 1253 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1254 1255 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1256 CU_ASSERT(rc == -ENOMEM); 1257 1258 io_wait_entry.entry.bdev = bdev; 1259 io_wait_entry.entry.cb_fn = io_wait_cb; 1260 io_wait_entry.entry.cb_arg = &io_wait_entry; 1261 io_wait_entry.io_ch = io_ch; 1262 io_wait_entry.desc = desc; 1263 io_wait_entry.submitted = false; 1264 /* Cannot use the same io_wait_entry for two different calls. */ 1265 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1266 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1267 1268 /* Queue two I/O waits. */ 1269 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1270 CU_ASSERT(rc == 0); 1271 CU_ASSERT(io_wait_entry.submitted == false); 1272 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1273 CU_ASSERT(rc == 0); 1274 CU_ASSERT(io_wait_entry2.submitted == false); 1275 1276 stub_complete_io(1); 1277 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1278 CU_ASSERT(io_wait_entry.submitted == true); 1279 CU_ASSERT(io_wait_entry2.submitted == false); 1280 1281 stub_complete_io(1); 1282 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1283 CU_ASSERT(io_wait_entry2.submitted == true); 1284 1285 stub_complete_io(4); 1286 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1287 1288 spdk_put_io_channel(io_ch); 1289 spdk_bdev_close(desc); 1290 free_bdev(bdev); 1291 ut_fini_bdev(); 1292 } 1293 1294 static void 1295 bdev_io_spans_split_test(void) 1296 { 1297 struct spdk_bdev bdev; 1298 struct spdk_bdev_io bdev_io; 1299 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1300 1301 memset(&bdev, 0, sizeof(bdev)); 1302 bdev_io.u.bdev.iovs = iov; 1303 1304 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1305 bdev.optimal_io_boundary = 0; 1306 bdev.max_segment_size = 0; 1307 bdev.max_num_segments = 0; 1308 bdev_io.bdev = &bdev; 1309 1310 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1311 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1312 1313 bdev.split_on_optimal_io_boundary = true; 1314 bdev.optimal_io_boundary = 32; 1315 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1316 1317 /* RESETs are not based on LBAs - so this should return false. */ 1318 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1319 1320 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1321 bdev_io.u.bdev.offset_blocks = 0; 1322 bdev_io.u.bdev.num_blocks = 32; 1323 1324 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1325 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1326 1327 bdev_io.u.bdev.num_blocks = 33; 1328 1329 /* This I/O spans a boundary. */ 1330 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1331 1332 bdev_io.u.bdev.num_blocks = 32; 1333 bdev.max_segment_size = 512 * 32; 1334 bdev.max_num_segments = 1; 1335 bdev_io.u.bdev.iovcnt = 1; 1336 iov[0].iov_len = 512; 1337 1338 /* Does not cross and exceed max_size or max_segs */ 1339 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1340 1341 bdev.split_on_optimal_io_boundary = false; 1342 bdev.max_segment_size = 512; 1343 bdev.max_num_segments = 1; 1344 bdev_io.u.bdev.iovcnt = 2; 1345 1346 /* Exceed max_segs */ 1347 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1348 1349 bdev.max_num_segments = 2; 1350 iov[0].iov_len = 513; 1351 iov[1].iov_len = 512; 1352 1353 /* Exceed max_sizes */ 1354 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1355 1356 bdev.max_segment_size = 0; 1357 bdev.write_unit_size = 32; 1358 bdev.split_on_write_unit = true; 1359 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1360 1361 /* This I/O is one write unit */ 1362 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1363 1364 bdev_io.u.bdev.num_blocks = 32 * 2; 1365 1366 /* This I/O is more than one write unit */ 1367 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1368 1369 bdev_io.u.bdev.offset_blocks = 1; 1370 bdev_io.u.bdev.num_blocks = 32; 1371 1372 /* This I/O is not aligned to write unit size */ 1373 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1374 } 1375 1376 static void 1377 bdev_io_boundary_split_test(void) 1378 { 1379 struct spdk_bdev *bdev; 1380 struct spdk_bdev_desc *desc = NULL; 1381 struct spdk_io_channel *io_ch; 1382 struct spdk_bdev_opts bdev_opts = {}; 1383 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1384 struct ut_expected_io *expected_io; 1385 void *md_buf = (void *)0xFF000000; 1386 uint64_t i; 1387 int rc; 1388 1389 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1390 bdev_opts.bdev_io_pool_size = 512; 1391 bdev_opts.bdev_io_cache_size = 64; 1392 ut_init_bdev(&bdev_opts); 1393 1394 bdev = allocate_bdev("bdev0"); 1395 1396 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1397 CU_ASSERT(rc == 0); 1398 SPDK_CU_ASSERT_FATAL(desc != NULL); 1399 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1400 io_ch = spdk_bdev_get_io_channel(desc); 1401 CU_ASSERT(io_ch != NULL); 1402 1403 bdev->optimal_io_boundary = 16; 1404 bdev->split_on_optimal_io_boundary = false; 1405 1406 g_io_done = false; 1407 1408 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1409 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1410 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1411 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1412 1413 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1414 CU_ASSERT(rc == 0); 1415 CU_ASSERT(g_io_done == false); 1416 1417 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1418 stub_complete_io(1); 1419 CU_ASSERT(g_io_done == true); 1420 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1421 1422 bdev->split_on_optimal_io_boundary = true; 1423 bdev->md_interleave = false; 1424 bdev->md_len = 8; 1425 1426 /* Now test that a single-vector command is split correctly. 1427 * Offset 14, length 8, payload 0xF000 1428 * Child - Offset 14, length 2, payload 0xF000 1429 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1430 * 1431 * Set up the expected values before calling spdk_bdev_read_blocks 1432 */ 1433 g_io_done = false; 1434 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1435 expected_io->md_buf = md_buf; 1436 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1437 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1438 1439 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1440 expected_io->md_buf = md_buf + 2 * 8; 1441 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1442 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1443 1444 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1445 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1446 14, 8, io_done, NULL); 1447 CU_ASSERT(rc == 0); 1448 CU_ASSERT(g_io_done == false); 1449 1450 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1451 stub_complete_io(2); 1452 CU_ASSERT(g_io_done == true); 1453 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1454 1455 /* Now set up a more complex, multi-vector command that needs to be split, 1456 * including splitting iovecs. 1457 */ 1458 iov[0].iov_base = (void *)0x10000; 1459 iov[0].iov_len = 512; 1460 iov[1].iov_base = (void *)0x20000; 1461 iov[1].iov_len = 20 * 512; 1462 iov[2].iov_base = (void *)0x30000; 1463 iov[2].iov_len = 11 * 512; 1464 1465 g_io_done = false; 1466 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1467 expected_io->md_buf = md_buf; 1468 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1469 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1470 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1471 1472 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1473 expected_io->md_buf = md_buf + 2 * 8; 1474 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1475 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1476 1477 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1478 expected_io->md_buf = md_buf + 18 * 8; 1479 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1480 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1481 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1482 1483 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1484 14, 32, io_done, NULL); 1485 CU_ASSERT(rc == 0); 1486 CU_ASSERT(g_io_done == false); 1487 1488 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1489 stub_complete_io(3); 1490 CU_ASSERT(g_io_done == true); 1491 1492 /* Test multi vector command that needs to be split by strip and then needs to be 1493 * split further due to the capacity of child iovs. 1494 */ 1495 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1496 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1497 iov[i].iov_len = 512; 1498 } 1499 1500 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1501 g_io_done = false; 1502 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1503 SPDK_BDEV_IO_NUM_CHILD_IOV); 1504 expected_io->md_buf = md_buf; 1505 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1506 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1507 } 1508 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1509 1510 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1511 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1512 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1513 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1514 ut_expected_io_set_iov(expected_io, i, 1515 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1516 } 1517 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1518 1519 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1520 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1521 CU_ASSERT(rc == 0); 1522 CU_ASSERT(g_io_done == false); 1523 1524 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1525 stub_complete_io(1); 1526 CU_ASSERT(g_io_done == false); 1527 1528 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1529 stub_complete_io(1); 1530 CU_ASSERT(g_io_done == true); 1531 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1532 1533 /* Test multi vector command that needs to be split by strip and then needs to be 1534 * split further due to the capacity of child iovs. In this case, the length of 1535 * the rest of iovec array with an I/O boundary is the multiple of block size. 1536 */ 1537 1538 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1539 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1540 */ 1541 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1542 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1543 iov[i].iov_len = 512; 1544 } 1545 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1546 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1547 iov[i].iov_len = 256; 1548 } 1549 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1550 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1551 1552 /* Add an extra iovec to trigger split */ 1553 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1554 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1555 1556 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1557 g_io_done = false; 1558 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1559 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1560 expected_io->md_buf = md_buf; 1561 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1562 ut_expected_io_set_iov(expected_io, i, 1563 (void *)((i + 1) * 0x10000), 512); 1564 } 1565 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1566 ut_expected_io_set_iov(expected_io, i, 1567 (void *)((i + 1) * 0x10000), 256); 1568 } 1569 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1570 1571 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1572 1, 1); 1573 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1574 ut_expected_io_set_iov(expected_io, 0, 1575 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1576 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1577 1578 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1579 1, 1); 1580 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1581 ut_expected_io_set_iov(expected_io, 0, 1582 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1583 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1584 1585 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1586 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1587 CU_ASSERT(rc == 0); 1588 CU_ASSERT(g_io_done == false); 1589 1590 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1591 stub_complete_io(1); 1592 CU_ASSERT(g_io_done == false); 1593 1594 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1595 stub_complete_io(2); 1596 CU_ASSERT(g_io_done == true); 1597 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1598 1599 /* Test multi vector command that needs to be split by strip and then needs to be 1600 * split further due to the capacity of child iovs, the child request offset should 1601 * be rewind to last aligned offset and go success without error. 1602 */ 1603 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1604 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1605 iov[i].iov_len = 512; 1606 } 1607 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1608 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1609 1610 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1611 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1612 1613 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1614 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1615 1616 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1617 g_io_done = false; 1618 g_io_status = 0; 1619 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1620 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1621 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1622 expected_io->md_buf = md_buf; 1623 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1624 ut_expected_io_set_iov(expected_io, i, 1625 (void *)((i + 1) * 0x10000), 512); 1626 } 1627 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1628 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1629 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1630 1, 2); 1631 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1632 ut_expected_io_set_iov(expected_io, 0, 1633 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1634 ut_expected_io_set_iov(expected_io, 1, 1635 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1636 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1637 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1638 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1639 1, 1); 1640 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1641 ut_expected_io_set_iov(expected_io, 0, 1642 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1643 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1644 1645 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1646 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1647 CU_ASSERT(rc == 0); 1648 CU_ASSERT(g_io_done == false); 1649 1650 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1651 stub_complete_io(1); 1652 CU_ASSERT(g_io_done == false); 1653 1654 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1655 stub_complete_io(2); 1656 CU_ASSERT(g_io_done == true); 1657 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1658 1659 /* Test multi vector command that needs to be split due to the IO boundary and 1660 * the capacity of child iovs. Especially test the case when the command is 1661 * split due to the capacity of child iovs, the tail address is not aligned with 1662 * block size and is rewinded to the aligned address. 1663 * 1664 * The iovecs used in read request is complex but is based on the data 1665 * collected in the real issue. We change the base addresses but keep the lengths 1666 * not to loose the credibility of the test. 1667 */ 1668 bdev->optimal_io_boundary = 128; 1669 g_io_done = false; 1670 g_io_status = 0; 1671 1672 for (i = 0; i < 31; i++) { 1673 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1674 iov[i].iov_len = 1024; 1675 } 1676 iov[31].iov_base = (void *)0xFEED1F00000; 1677 iov[31].iov_len = 32768; 1678 iov[32].iov_base = (void *)0xFEED2000000; 1679 iov[32].iov_len = 160; 1680 iov[33].iov_base = (void *)0xFEED2100000; 1681 iov[33].iov_len = 4096; 1682 iov[34].iov_base = (void *)0xFEED2200000; 1683 iov[34].iov_len = 4096; 1684 iov[35].iov_base = (void *)0xFEED2300000; 1685 iov[35].iov_len = 4096; 1686 iov[36].iov_base = (void *)0xFEED2400000; 1687 iov[36].iov_len = 4096; 1688 iov[37].iov_base = (void *)0xFEED2500000; 1689 iov[37].iov_len = 4096; 1690 iov[38].iov_base = (void *)0xFEED2600000; 1691 iov[38].iov_len = 4096; 1692 iov[39].iov_base = (void *)0xFEED2700000; 1693 iov[39].iov_len = 4096; 1694 iov[40].iov_base = (void *)0xFEED2800000; 1695 iov[40].iov_len = 4096; 1696 iov[41].iov_base = (void *)0xFEED2900000; 1697 iov[41].iov_len = 4096; 1698 iov[42].iov_base = (void *)0xFEED2A00000; 1699 iov[42].iov_len = 4096; 1700 iov[43].iov_base = (void *)0xFEED2B00000; 1701 iov[43].iov_len = 12288; 1702 iov[44].iov_base = (void *)0xFEED2C00000; 1703 iov[44].iov_len = 8192; 1704 iov[45].iov_base = (void *)0xFEED2F00000; 1705 iov[45].iov_len = 4096; 1706 iov[46].iov_base = (void *)0xFEED3000000; 1707 iov[46].iov_len = 4096; 1708 iov[47].iov_base = (void *)0xFEED3100000; 1709 iov[47].iov_len = 4096; 1710 iov[48].iov_base = (void *)0xFEED3200000; 1711 iov[48].iov_len = 24576; 1712 iov[49].iov_base = (void *)0xFEED3300000; 1713 iov[49].iov_len = 16384; 1714 iov[50].iov_base = (void *)0xFEED3400000; 1715 iov[50].iov_len = 12288; 1716 iov[51].iov_base = (void *)0xFEED3500000; 1717 iov[51].iov_len = 4096; 1718 iov[52].iov_base = (void *)0xFEED3600000; 1719 iov[52].iov_len = 4096; 1720 iov[53].iov_base = (void *)0xFEED3700000; 1721 iov[53].iov_len = 4096; 1722 iov[54].iov_base = (void *)0xFEED3800000; 1723 iov[54].iov_len = 28672; 1724 iov[55].iov_base = (void *)0xFEED3900000; 1725 iov[55].iov_len = 20480; 1726 iov[56].iov_base = (void *)0xFEED3A00000; 1727 iov[56].iov_len = 4096; 1728 iov[57].iov_base = (void *)0xFEED3B00000; 1729 iov[57].iov_len = 12288; 1730 iov[58].iov_base = (void *)0xFEED3C00000; 1731 iov[58].iov_len = 4096; 1732 iov[59].iov_base = (void *)0xFEED3D00000; 1733 iov[59].iov_len = 4096; 1734 iov[60].iov_base = (void *)0xFEED3E00000; 1735 iov[60].iov_len = 352; 1736 1737 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1738 * of child iovs, 1739 */ 1740 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1741 expected_io->md_buf = md_buf; 1742 for (i = 0; i < 32; i++) { 1743 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1744 } 1745 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1746 1747 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1748 * split by the IO boundary requirement. 1749 */ 1750 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1751 expected_io->md_buf = md_buf + 126 * 8; 1752 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1753 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1754 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1755 1756 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1757 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1758 */ 1759 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1760 expected_io->md_buf = md_buf + 128 * 8; 1761 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1762 iov[33].iov_len - 864); 1763 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1764 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1765 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1766 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1767 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1768 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1769 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1770 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1771 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1772 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1773 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1774 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1775 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1776 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1777 1778 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1779 * first 864 bytes of iov[52] split by the IO boundary requirement. 1780 */ 1781 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1782 expected_io->md_buf = md_buf + 256 * 8; 1783 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1784 iov[46].iov_len - 864); 1785 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1786 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1787 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1788 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1789 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1790 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1791 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1792 1793 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1794 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1795 */ 1796 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1797 expected_io->md_buf = md_buf + 384 * 8; 1798 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1799 iov[52].iov_len - 864); 1800 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1801 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1802 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1803 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1804 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1805 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1806 1807 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1808 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1809 */ 1810 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1811 expected_io->md_buf = md_buf + 512 * 8; 1812 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1813 iov[57].iov_len - 4960); 1814 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1815 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1816 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1817 1818 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1819 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1820 expected_io->md_buf = md_buf + 542 * 8; 1821 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1822 iov[59].iov_len - 3936); 1823 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1824 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1825 1826 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1827 0, 543, io_done, NULL); 1828 CU_ASSERT(rc == 0); 1829 CU_ASSERT(g_io_done == false); 1830 1831 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1832 stub_complete_io(1); 1833 CU_ASSERT(g_io_done == false); 1834 1835 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1836 stub_complete_io(5); 1837 CU_ASSERT(g_io_done == false); 1838 1839 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1840 stub_complete_io(1); 1841 CU_ASSERT(g_io_done == true); 1842 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1843 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1844 1845 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1846 * split, so test that. 1847 */ 1848 bdev->optimal_io_boundary = 15; 1849 g_io_done = false; 1850 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1851 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1852 1853 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1854 CU_ASSERT(rc == 0); 1855 CU_ASSERT(g_io_done == false); 1856 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1857 stub_complete_io(1); 1858 CU_ASSERT(g_io_done == true); 1859 1860 /* Test an UNMAP. This should also not be split. */ 1861 bdev->optimal_io_boundary = 16; 1862 g_io_done = false; 1863 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1864 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1865 1866 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1867 CU_ASSERT(rc == 0); 1868 CU_ASSERT(g_io_done == false); 1869 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1870 stub_complete_io(1); 1871 CU_ASSERT(g_io_done == true); 1872 1873 /* Test a FLUSH. This should also not be split. */ 1874 bdev->optimal_io_boundary = 16; 1875 g_io_done = false; 1876 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1877 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1878 1879 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1880 CU_ASSERT(rc == 0); 1881 CU_ASSERT(g_io_done == false); 1882 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1883 stub_complete_io(1); 1884 CU_ASSERT(g_io_done == true); 1885 1886 /* Test a COPY. This should also not be split. */ 1887 bdev->optimal_io_boundary = 15; 1888 g_io_done = false; 1889 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1890 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1891 1892 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1893 CU_ASSERT(rc == 0); 1894 CU_ASSERT(g_io_done == false); 1895 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1896 stub_complete_io(1); 1897 CU_ASSERT(g_io_done == true); 1898 1899 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1900 1901 /* Children requests return an error status */ 1902 bdev->optimal_io_boundary = 16; 1903 iov[0].iov_base = (void *)0x10000; 1904 iov[0].iov_len = 512 * 64; 1905 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1906 g_io_done = false; 1907 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1908 1909 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1910 CU_ASSERT(rc == 0); 1911 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1912 stub_complete_io(4); 1913 CU_ASSERT(g_io_done == false); 1914 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1915 stub_complete_io(1); 1916 CU_ASSERT(g_io_done == true); 1917 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1918 1919 /* Test if a multi vector command terminated with failure before continuing 1920 * splitting process when one of child I/O failed. 1921 * The multi vector command is as same as the above that needs to be split by strip 1922 * and then needs to be split further due to the capacity of child iovs. 1923 */ 1924 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1925 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1926 iov[i].iov_len = 512; 1927 } 1928 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1929 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1930 1931 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1932 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1933 1934 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1935 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1936 1937 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1938 1939 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1940 g_io_done = false; 1941 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1942 1943 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1944 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1945 CU_ASSERT(rc == 0); 1946 CU_ASSERT(g_io_done == false); 1947 1948 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1949 stub_complete_io(1); 1950 CU_ASSERT(g_io_done == true); 1951 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1952 1953 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1954 1955 /* for this test we will create the following conditions to hit the code path where 1956 * we are trying to send and IO following a split that has no iovs because we had to 1957 * trim them for alignment reasons. 1958 * 1959 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1960 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1961 * position 30 and overshoot by 0x2e. 1962 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1963 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1964 * which eliniates that vector so we just send the first split IO with 30 vectors 1965 * and let the completion pick up the last 2 vectors. 1966 */ 1967 bdev->optimal_io_boundary = 32; 1968 bdev->split_on_optimal_io_boundary = true; 1969 g_io_done = false; 1970 1971 /* Init all parent IOVs to 0x212 */ 1972 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1973 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1974 iov[i].iov_len = 0x212; 1975 } 1976 1977 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1978 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1979 /* expect 0-29 to be 1:1 with the parent iov */ 1980 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1981 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1982 } 1983 1984 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1985 * where 0x1e is the amount we overshot the 16K boundary 1986 */ 1987 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1988 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1989 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1990 1991 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1992 * shortened that take it to the next boundary and then a final one to get us to 1993 * 0x4200 bytes for the IO. 1994 */ 1995 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1996 1, 2); 1997 /* position 30 picked up the remaining bytes to the next boundary */ 1998 ut_expected_io_set_iov(expected_io, 0, 1999 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 2000 2001 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 2002 ut_expected_io_set_iov(expected_io, 1, 2003 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 2004 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2005 2006 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 2007 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2008 CU_ASSERT(rc == 0); 2009 CU_ASSERT(g_io_done == false); 2010 2011 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2012 stub_complete_io(1); 2013 CU_ASSERT(g_io_done == false); 2014 2015 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2016 stub_complete_io(1); 2017 CU_ASSERT(g_io_done == true); 2018 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2019 2020 spdk_put_io_channel(io_ch); 2021 spdk_bdev_close(desc); 2022 free_bdev(bdev); 2023 ut_fini_bdev(); 2024 } 2025 2026 static void 2027 bdev_io_max_size_and_segment_split_test(void) 2028 { 2029 struct spdk_bdev *bdev; 2030 struct spdk_bdev_desc *desc = NULL; 2031 struct spdk_io_channel *io_ch; 2032 struct spdk_bdev_opts bdev_opts = {}; 2033 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2034 struct ut_expected_io *expected_io; 2035 uint64_t i; 2036 int rc; 2037 2038 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2039 bdev_opts.bdev_io_pool_size = 512; 2040 bdev_opts.bdev_io_cache_size = 64; 2041 bdev_opts.opts_size = sizeof(bdev_opts); 2042 ut_init_bdev(&bdev_opts); 2043 2044 bdev = allocate_bdev("bdev0"); 2045 2046 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2047 CU_ASSERT(rc == 0); 2048 SPDK_CU_ASSERT_FATAL(desc != NULL); 2049 io_ch = spdk_bdev_get_io_channel(desc); 2050 CU_ASSERT(io_ch != NULL); 2051 2052 bdev->split_on_optimal_io_boundary = false; 2053 bdev->optimal_io_boundary = 0; 2054 2055 /* Case 0 max_num_segments == 0. 2056 * but segment size 2 * 512 > 512 2057 */ 2058 bdev->max_segment_size = 512; 2059 bdev->max_num_segments = 0; 2060 g_io_done = false; 2061 2062 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2063 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2064 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2065 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2066 2067 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2068 CU_ASSERT(rc == 0); 2069 CU_ASSERT(g_io_done == false); 2070 2071 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2072 stub_complete_io(1); 2073 CU_ASSERT(g_io_done == true); 2074 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2075 2076 /* Case 1 max_segment_size == 0 2077 * but iov num 2 > 1. 2078 */ 2079 bdev->max_segment_size = 0; 2080 bdev->max_num_segments = 1; 2081 g_io_done = false; 2082 2083 iov[0].iov_base = (void *)0x10000; 2084 iov[0].iov_len = 512; 2085 iov[1].iov_base = (void *)0x20000; 2086 iov[1].iov_len = 8 * 512; 2087 2088 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2089 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2090 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2091 2092 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2093 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2094 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2095 2096 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2097 CU_ASSERT(rc == 0); 2098 CU_ASSERT(g_io_done == false); 2099 2100 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2101 stub_complete_io(2); 2102 CU_ASSERT(g_io_done == true); 2103 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2104 2105 /* Test that a non-vector command is split correctly. 2106 * Set up the expected values before calling spdk_bdev_read_blocks 2107 */ 2108 bdev->max_segment_size = 512; 2109 bdev->max_num_segments = 1; 2110 g_io_done = false; 2111 2112 /* Child IO 0 */ 2113 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2114 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2115 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2116 2117 /* Child IO 1 */ 2118 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2119 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2120 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2121 2122 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2123 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2124 CU_ASSERT(rc == 0); 2125 CU_ASSERT(g_io_done == false); 2126 2127 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2128 stub_complete_io(2); 2129 CU_ASSERT(g_io_done == true); 2130 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2131 2132 /* Now set up a more complex, multi-vector command that needs to be split, 2133 * including splitting iovecs. 2134 */ 2135 bdev->max_segment_size = 2 * 512; 2136 bdev->max_num_segments = 1; 2137 g_io_done = false; 2138 2139 iov[0].iov_base = (void *)0x10000; 2140 iov[0].iov_len = 2 * 512; 2141 iov[1].iov_base = (void *)0x20000; 2142 iov[1].iov_len = 4 * 512; 2143 iov[2].iov_base = (void *)0x30000; 2144 iov[2].iov_len = 6 * 512; 2145 2146 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2147 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2148 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2149 2150 /* Split iov[1].size to 2 iov entries then split the segments */ 2151 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2152 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2153 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2154 2155 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2156 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2157 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2158 2159 /* Split iov[2].size to 3 iov entries then split the segments */ 2160 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2161 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2162 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2163 2164 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2165 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2166 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2167 2168 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2169 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2170 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2171 2172 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2173 CU_ASSERT(rc == 0); 2174 CU_ASSERT(g_io_done == false); 2175 2176 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2177 stub_complete_io(6); 2178 CU_ASSERT(g_io_done == true); 2179 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2180 2181 /* Test multi vector command that needs to be split by strip and then needs to be 2182 * split further due to the capacity of parent IO child iovs. 2183 */ 2184 bdev->max_segment_size = 512; 2185 bdev->max_num_segments = 1; 2186 g_io_done = false; 2187 2188 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2189 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2190 iov[i].iov_len = 512 * 2; 2191 } 2192 2193 /* Each input iov.size is split into 2 iovs, 2194 * half of the input iov can fill all child iov entries of a single IO. 2195 */ 2196 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2197 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2198 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2199 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2200 2201 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2202 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2203 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2204 } 2205 2206 /* The remaining iov is split in the second round */ 2207 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2208 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2209 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2210 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2211 2212 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2213 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2214 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2215 } 2216 2217 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2218 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2219 CU_ASSERT(rc == 0); 2220 CU_ASSERT(g_io_done == false); 2221 2222 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2223 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2224 CU_ASSERT(g_io_done == false); 2225 2226 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2227 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2228 CU_ASSERT(g_io_done == true); 2229 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2230 2231 /* A wrong case, a child IO that is divided does 2232 * not meet the principle of multiples of block size, 2233 * and exits with error 2234 */ 2235 bdev->max_segment_size = 512; 2236 bdev->max_num_segments = 1; 2237 g_io_done = false; 2238 2239 iov[0].iov_base = (void *)0x10000; 2240 iov[0].iov_len = 512 + 256; 2241 iov[1].iov_base = (void *)0x20000; 2242 iov[1].iov_len = 256; 2243 2244 /* iov[0] is split to 512 and 256. 2245 * 256 is less than a block size, and it is found 2246 * in the next round of split that it is the first child IO smaller than 2247 * the block size, so the error exit 2248 */ 2249 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2250 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2251 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2252 2253 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2254 CU_ASSERT(rc == 0); 2255 CU_ASSERT(g_io_done == false); 2256 2257 /* First child IO is OK */ 2258 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2259 stub_complete_io(1); 2260 CU_ASSERT(g_io_done == true); 2261 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2262 2263 /* error exit */ 2264 stub_complete_io(1); 2265 CU_ASSERT(g_io_done == true); 2266 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2267 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2268 2269 /* Test multi vector command that needs to be split by strip and then needs to be 2270 * split further due to the capacity of child iovs. 2271 * 2272 * In this case, the last two iovs need to be split, but it will exceed the capacity 2273 * of child iovs, so it needs to wait until the first batch completed. 2274 */ 2275 bdev->max_segment_size = 512; 2276 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2277 g_io_done = false; 2278 2279 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2280 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2281 iov[i].iov_len = 512; 2282 } 2283 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2284 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2285 iov[i].iov_len = 512 * 2; 2286 } 2287 2288 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2289 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2290 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2291 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2292 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2293 } 2294 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2295 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2296 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2297 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2298 2299 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2300 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2301 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2302 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2303 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2304 2305 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2306 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2307 CU_ASSERT(rc == 0); 2308 CU_ASSERT(g_io_done == false); 2309 2310 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2311 stub_complete_io(1); 2312 CU_ASSERT(g_io_done == false); 2313 2314 /* Next round */ 2315 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2316 stub_complete_io(1); 2317 CU_ASSERT(g_io_done == true); 2318 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2319 2320 /* This case is similar to the previous one, but the io composed of 2321 * the last few entries of child iov is not enough for a blocklen, so they 2322 * cannot be put into this IO, but wait until the next time. 2323 */ 2324 bdev->max_segment_size = 512; 2325 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2326 g_io_done = false; 2327 2328 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2329 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2330 iov[i].iov_len = 512; 2331 } 2332 2333 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2334 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2335 iov[i].iov_len = 128; 2336 } 2337 2338 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2339 * Because the left 2 iov is not enough for a blocklen. 2340 */ 2341 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2342 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2343 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2344 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2345 } 2346 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2347 2348 /* The second child io waits until the end of the first child io before executing. 2349 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2350 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2351 */ 2352 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2353 1, 4); 2354 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2355 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2356 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2357 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2358 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2359 2360 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2361 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2362 CU_ASSERT(rc == 0); 2363 CU_ASSERT(g_io_done == false); 2364 2365 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2366 stub_complete_io(1); 2367 CU_ASSERT(g_io_done == false); 2368 2369 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2370 stub_complete_io(1); 2371 CU_ASSERT(g_io_done == true); 2372 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2373 2374 /* A very complicated case. Each sg entry exceeds max_segment_size and 2375 * needs to be split. At the same time, child io must be a multiple of blocklen. 2376 * At the same time, child iovcnt exceeds parent iovcnt. 2377 */ 2378 bdev->max_segment_size = 512 + 128; 2379 bdev->max_num_segments = 3; 2380 g_io_done = false; 2381 2382 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2383 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2384 iov[i].iov_len = 512 + 256; 2385 } 2386 2387 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2388 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2389 iov[i].iov_len = 512 + 128; 2390 } 2391 2392 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2393 * Consume 4 parent IO iov entries per for() round and 6 block size. 2394 * Generate 9 child IOs. 2395 */ 2396 for (i = 0; i < 3; i++) { 2397 uint32_t j = i * 4; 2398 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2399 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2400 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2401 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2402 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2403 2404 /* Child io must be a multiple of blocklen 2405 * iov[j + 2] must be split. If the third entry is also added, 2406 * the multiple of blocklen cannot be guaranteed. But it still 2407 * occupies one iov entry of the parent child iov. 2408 */ 2409 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2410 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2411 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2412 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2413 2414 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2415 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2416 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2417 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2418 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2419 } 2420 2421 /* Child iov position at 27, the 10th child IO 2422 * iov entry index is 3 * 4 and offset is 3 * 6 2423 */ 2424 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2425 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2426 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2427 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2428 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2429 2430 /* Child iov position at 30, the 11th child IO */ 2431 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2432 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2433 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2434 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2435 2436 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2437 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2438 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2439 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2440 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2441 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2442 2443 /* Consume 9 child IOs and 27 child iov entries. 2444 * Consume 4 parent IO iov entries per for() round and 6 block size. 2445 * Parent IO iov index start from 16 and block offset start from 24 2446 */ 2447 for (i = 0; i < 3; i++) { 2448 uint32_t j = i * 4 + 16; 2449 uint32_t offset = i * 6 + 24; 2450 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2451 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2452 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2453 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2454 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2455 2456 /* Child io must be a multiple of blocklen 2457 * iov[j + 2] must be split. If the third entry is also added, 2458 * the multiple of blocklen cannot be guaranteed. But it still 2459 * occupies one iov entry of the parent child iov. 2460 */ 2461 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2462 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2463 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2464 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2465 2466 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2467 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2468 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2469 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2470 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2471 } 2472 2473 /* The 22th child IO, child iov position at 30 */ 2474 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2475 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2476 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2477 2478 /* The third round */ 2479 /* Here is the 23nd child IO and child iovpos is 0 */ 2480 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2481 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2482 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2483 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2484 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2485 2486 /* The 24th child IO */ 2487 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2488 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2489 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2490 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2491 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2492 2493 /* The 25th child IO */ 2494 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2495 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2496 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2497 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2498 2499 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2500 50, io_done, NULL); 2501 CU_ASSERT(rc == 0); 2502 CU_ASSERT(g_io_done == false); 2503 2504 /* Parent IO supports up to 32 child iovs, so it is calculated that 2505 * a maximum of 11 IOs can be split at a time, and the 2506 * splitting will continue after the first batch is over. 2507 */ 2508 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2509 stub_complete_io(11); 2510 CU_ASSERT(g_io_done == false); 2511 2512 /* The 2nd round */ 2513 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2514 stub_complete_io(11); 2515 CU_ASSERT(g_io_done == false); 2516 2517 /* The last round */ 2518 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2519 stub_complete_io(3); 2520 CU_ASSERT(g_io_done == true); 2521 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2522 2523 /* Test an WRITE_ZEROES. This should also not be split. */ 2524 bdev->max_segment_size = 512; 2525 bdev->max_num_segments = 1; 2526 g_io_done = false; 2527 2528 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2530 2531 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2532 CU_ASSERT(rc == 0); 2533 CU_ASSERT(g_io_done == false); 2534 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2535 stub_complete_io(1); 2536 CU_ASSERT(g_io_done == true); 2537 2538 /* Test an UNMAP. This should also not be split. */ 2539 g_io_done = false; 2540 2541 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2542 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2543 2544 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2545 CU_ASSERT(rc == 0); 2546 CU_ASSERT(g_io_done == false); 2547 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2548 stub_complete_io(1); 2549 CU_ASSERT(g_io_done == true); 2550 2551 /* Test a FLUSH. This should also not be split. */ 2552 g_io_done = false; 2553 2554 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2555 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2556 2557 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 4, io_done, NULL); 2558 CU_ASSERT(rc == 0); 2559 CU_ASSERT(g_io_done == false); 2560 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2561 stub_complete_io(1); 2562 CU_ASSERT(g_io_done == true); 2563 2564 /* Test a COPY. This should also not be split. */ 2565 g_io_done = false; 2566 2567 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2568 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2569 2570 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2571 CU_ASSERT(rc == 0); 2572 CU_ASSERT(g_io_done == false); 2573 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2574 stub_complete_io(1); 2575 CU_ASSERT(g_io_done == true); 2576 2577 /* Test that IOs are split on max_rw_size */ 2578 bdev->max_rw_size = 2; 2579 bdev->max_segment_size = 0; 2580 bdev->max_num_segments = 0; 2581 g_io_done = false; 2582 2583 /* 5 blocks in a contiguous buffer */ 2584 iov[0].iov_base = (void *)0x10000; 2585 iov[0].iov_len = 5 * 512; 2586 2587 /* First: offset=0, num_blocks=2 */ 2588 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2589 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2590 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2591 /* Second: offset=2, num_blocks=2 */ 2592 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1); 2593 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512); 2594 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2595 /* Third: offset=4, num_blocks=1 */ 2596 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2597 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512); 2598 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2599 2600 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL); 2601 CU_ASSERT(rc == 0); 2602 CU_ASSERT(g_io_done == false); 2603 2604 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2605 stub_complete_io(3); 2606 CU_ASSERT(g_io_done == true); 2607 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2608 2609 /* Check splitting on both max_rw_size + max_num_segments */ 2610 bdev->max_rw_size = 2; 2611 bdev->max_num_segments = 2; 2612 bdev->max_segment_size = 0; 2613 g_io_done = false; 2614 2615 /* 5 blocks split across 4 iovs */ 2616 iov[0].iov_base = (void *)0x10000; 2617 iov[0].iov_len = 3 * 512; 2618 iov[1].iov_base = (void *)0x20000; 2619 iov[1].iov_len = 256; 2620 iov[2].iov_base = (void *)0x30000; 2621 iov[2].iov_len = 256; 2622 iov[3].iov_base = (void *)0x40000; 2623 iov[3].iov_len = 512; 2624 2625 /* First: offset=0, num_blocks=2, iovcnt=1 */ 2626 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2627 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2628 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2629 /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting 2630 * the rest of iov[0], and iov[1]+iov[2]) 2631 */ 2632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1); 2633 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512); 2634 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2635 /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */ 2636 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2); 2637 ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256); 2638 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256); 2639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2640 /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */ 2641 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2642 ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512); 2643 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2644 2645 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL); 2646 CU_ASSERT(rc == 0); 2647 CU_ASSERT(g_io_done == false); 2648 2649 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2650 stub_complete_io(4); 2651 CU_ASSERT(g_io_done == true); 2652 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2653 2654 /* Check splitting on both max_rw_size + max_segment_size */ 2655 bdev->max_rw_size = 2; 2656 bdev->max_segment_size = 512; 2657 bdev->max_num_segments = 0; 2658 g_io_done = false; 2659 2660 /* 6 blocks in a contiguous buffer */ 2661 iov[0].iov_base = (void *)0x10000; 2662 iov[0].iov_len = 6 * 512; 2663 2664 /* We expect 3 IOs each with 2 blocks and 2 iovs */ 2665 for (i = 0; i < 3; ++i) { 2666 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2); 2667 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512); 2668 ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512); 2669 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2670 } 2671 2672 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL); 2673 CU_ASSERT(rc == 0); 2674 CU_ASSERT(g_io_done == false); 2675 2676 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2677 stub_complete_io(3); 2678 CU_ASSERT(g_io_done == true); 2679 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2680 2681 /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */ 2682 bdev->max_rw_size = 1; 2683 bdev->max_segment_size = 0; 2684 bdev->max_num_segments = 0; 2685 g_io_done = false; 2686 2687 /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */ 2688 iov[0].iov_base = (void *)0x10000; 2689 iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512; 2690 2691 /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */ 2692 for (i = 0; i < 3; ++i) { 2693 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1); 2694 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512); 2695 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2696 } 2697 2698 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2699 CU_ASSERT(rc == 0); 2700 CU_ASSERT(g_io_done == false); 2701 2702 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2703 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2704 CU_ASSERT(g_io_done == false); 2705 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2706 stub_complete_io(1); 2707 CU_ASSERT(g_io_done == true); 2708 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2709 2710 spdk_put_io_channel(io_ch); 2711 spdk_bdev_close(desc); 2712 free_bdev(bdev); 2713 ut_fini_bdev(); 2714 } 2715 2716 static void 2717 bdev_io_mix_split_test(void) 2718 { 2719 struct spdk_bdev *bdev; 2720 struct spdk_bdev_desc *desc = NULL; 2721 struct spdk_io_channel *io_ch; 2722 struct spdk_bdev_opts bdev_opts = {}; 2723 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2724 struct ut_expected_io *expected_io; 2725 uint64_t i; 2726 int rc; 2727 2728 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2729 bdev_opts.bdev_io_pool_size = 512; 2730 bdev_opts.bdev_io_cache_size = 64; 2731 ut_init_bdev(&bdev_opts); 2732 2733 bdev = allocate_bdev("bdev0"); 2734 2735 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2736 CU_ASSERT(rc == 0); 2737 SPDK_CU_ASSERT_FATAL(desc != NULL); 2738 io_ch = spdk_bdev_get_io_channel(desc); 2739 CU_ASSERT(io_ch != NULL); 2740 2741 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2742 bdev->split_on_optimal_io_boundary = true; 2743 bdev->optimal_io_boundary = 16; 2744 2745 bdev->max_segment_size = 512; 2746 bdev->max_num_segments = 16; 2747 g_io_done = false; 2748 2749 /* IO crossing the IO boundary requires split 2750 * Total 2 child IOs. 2751 */ 2752 2753 /* The 1st child IO split the segment_size to multiple segment entry */ 2754 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2755 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2756 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2757 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2758 2759 /* The 2nd child IO split the segment_size to multiple segment entry */ 2760 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2761 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2762 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2763 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2764 2765 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2766 CU_ASSERT(rc == 0); 2767 CU_ASSERT(g_io_done == false); 2768 2769 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2770 stub_complete_io(2); 2771 CU_ASSERT(g_io_done == true); 2772 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2773 2774 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2775 bdev->max_segment_size = 15 * 512; 2776 bdev->max_num_segments = 1; 2777 g_io_done = false; 2778 2779 /* IO crossing the IO boundary requires split. 2780 * The 1st child IO segment size exceeds the max_segment_size, 2781 * So 1st child IO will be split to multiple segment entry. 2782 * Then it split to 2 child IOs because of the max_num_segments. 2783 * Total 3 child IOs. 2784 */ 2785 2786 /* The first 2 IOs are in an IO boundary. 2787 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2788 * So it split to the first 2 IOs. 2789 */ 2790 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2791 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2792 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2793 2794 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2795 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2796 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2797 2798 /* The 3rd Child IO is because of the io boundary */ 2799 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2800 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2801 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2802 2803 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2804 CU_ASSERT(rc == 0); 2805 CU_ASSERT(g_io_done == false); 2806 2807 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2808 stub_complete_io(3); 2809 CU_ASSERT(g_io_done == true); 2810 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2811 2812 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2813 bdev->max_segment_size = 17 * 512; 2814 bdev->max_num_segments = 1; 2815 g_io_done = false; 2816 2817 /* IO crossing the IO boundary requires split. 2818 * Child IO does not split. 2819 * Total 2 child IOs. 2820 */ 2821 2822 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2823 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2824 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2825 2826 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2827 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2828 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2829 2830 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2831 CU_ASSERT(rc == 0); 2832 CU_ASSERT(g_io_done == false); 2833 2834 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2835 stub_complete_io(2); 2836 CU_ASSERT(g_io_done == true); 2837 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2838 2839 /* Now set up a more complex, multi-vector command that needs to be split, 2840 * including splitting iovecs. 2841 * optimal_io_boundary < max_segment_size * max_num_segments 2842 */ 2843 bdev->max_segment_size = 3 * 512; 2844 bdev->max_num_segments = 6; 2845 g_io_done = false; 2846 2847 iov[0].iov_base = (void *)0x10000; 2848 iov[0].iov_len = 4 * 512; 2849 iov[1].iov_base = (void *)0x20000; 2850 iov[1].iov_len = 4 * 512; 2851 iov[2].iov_base = (void *)0x30000; 2852 iov[2].iov_len = 10 * 512; 2853 2854 /* IO crossing the IO boundary requires split. 2855 * The 1st child IO segment size exceeds the max_segment_size and after 2856 * splitting segment_size, the num_segments exceeds max_num_segments. 2857 * So 1st child IO will be split to 2 child IOs. 2858 * Total 3 child IOs. 2859 */ 2860 2861 /* The first 2 IOs are in an IO boundary. 2862 * After splitting segment size the segment num exceeds. 2863 * So it splits to 2 child IOs. 2864 */ 2865 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2866 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2867 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2868 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2869 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2870 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2871 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2872 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2873 2874 /* The 2nd child IO has the left segment entry */ 2875 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2876 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2877 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2878 2879 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2880 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2881 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2882 2883 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2884 CU_ASSERT(rc == 0); 2885 CU_ASSERT(g_io_done == false); 2886 2887 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2888 stub_complete_io(3); 2889 CU_ASSERT(g_io_done == true); 2890 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2891 2892 /* A very complicated case. Each sg entry exceeds max_segment_size 2893 * and split on io boundary. 2894 * optimal_io_boundary < max_segment_size * max_num_segments 2895 */ 2896 bdev->max_segment_size = 3 * 512; 2897 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2898 g_io_done = false; 2899 2900 for (i = 0; i < 20; i++) { 2901 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2902 iov[i].iov_len = 512 * 4; 2903 } 2904 2905 /* IO crossing the IO boundary requires split. 2906 * 80 block length can split 5 child IOs base on offset and IO boundary. 2907 * Each iov entry needs to be split to 2 entries because of max_segment_size 2908 * Total 5 child IOs. 2909 */ 2910 2911 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2912 * So each child IO occupies 8 child iov entries. 2913 */ 2914 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2915 for (i = 0; i < 4; i++) { 2916 int iovcnt = i * 2; 2917 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2918 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2919 } 2920 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2921 2922 /* 2nd child IO and total 16 child iov entries of parent IO */ 2923 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2924 for (i = 4; i < 8; i++) { 2925 int iovcnt = (i - 4) * 2; 2926 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2927 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2928 } 2929 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2930 2931 /* 3rd child IO and total 24 child iov entries of parent IO */ 2932 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2933 for (i = 8; i < 12; i++) { 2934 int iovcnt = (i - 8) * 2; 2935 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2936 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2937 } 2938 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2939 2940 /* 4th child IO and total 32 child iov entries of parent IO */ 2941 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2942 for (i = 12; i < 16; i++) { 2943 int iovcnt = (i - 12) * 2; 2944 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2945 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2946 } 2947 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2948 2949 /* 5th child IO and because of the child iov entry it should be split 2950 * in next round. 2951 */ 2952 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2953 for (i = 16; i < 20; i++) { 2954 int iovcnt = (i - 16) * 2; 2955 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2956 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2957 } 2958 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2959 2960 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2961 CU_ASSERT(rc == 0); 2962 CU_ASSERT(g_io_done == false); 2963 2964 /* First split round */ 2965 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2966 stub_complete_io(4); 2967 CU_ASSERT(g_io_done == false); 2968 2969 /* Second split round */ 2970 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2971 stub_complete_io(1); 2972 CU_ASSERT(g_io_done == true); 2973 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2974 2975 spdk_put_io_channel(io_ch); 2976 spdk_bdev_close(desc); 2977 free_bdev(bdev); 2978 ut_fini_bdev(); 2979 } 2980 2981 static void 2982 bdev_io_split_with_io_wait(void) 2983 { 2984 struct spdk_bdev *bdev; 2985 struct spdk_bdev_desc *desc = NULL; 2986 struct spdk_io_channel *io_ch; 2987 struct spdk_bdev_channel *channel; 2988 struct spdk_bdev_mgmt_channel *mgmt_ch; 2989 struct spdk_bdev_opts bdev_opts = {}; 2990 struct iovec iov[3]; 2991 struct ut_expected_io *expected_io; 2992 int rc; 2993 2994 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2995 bdev_opts.bdev_io_pool_size = 2; 2996 bdev_opts.bdev_io_cache_size = 1; 2997 ut_init_bdev(&bdev_opts); 2998 2999 bdev = allocate_bdev("bdev0"); 3000 3001 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3002 CU_ASSERT(rc == 0); 3003 CU_ASSERT(desc != NULL); 3004 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3005 io_ch = spdk_bdev_get_io_channel(desc); 3006 CU_ASSERT(io_ch != NULL); 3007 channel = spdk_io_channel_get_ctx(io_ch); 3008 mgmt_ch = channel->shared_resource->mgmt_ch; 3009 3010 bdev->optimal_io_boundary = 16; 3011 bdev->split_on_optimal_io_boundary = true; 3012 3013 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 3014 CU_ASSERT(rc == 0); 3015 3016 /* Now test that a single-vector command is split correctly. 3017 * Offset 14, length 8, payload 0xF000 3018 * Child - Offset 14, length 2, payload 0xF000 3019 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3020 * 3021 * Set up the expected values before calling spdk_bdev_read_blocks 3022 */ 3023 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 3024 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 3025 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3026 3027 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 3028 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 3029 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3030 3031 /* The following children will be submitted sequentially due to the capacity of 3032 * spdk_bdev_io. 3033 */ 3034 3035 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 3036 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 3037 CU_ASSERT(rc == 0); 3038 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3039 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3040 3041 /* Completing the first read I/O will submit the first child */ 3042 stub_complete_io(1); 3043 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3044 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3045 3046 /* Completing the first child will submit the second child */ 3047 stub_complete_io(1); 3048 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3049 3050 /* Complete the second child I/O. This should result in our callback getting 3051 * invoked since the parent I/O is now complete. 3052 */ 3053 stub_complete_io(1); 3054 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3055 3056 /* Now set up a more complex, multi-vector command that needs to be split, 3057 * including splitting iovecs. 3058 */ 3059 iov[0].iov_base = (void *)0x10000; 3060 iov[0].iov_len = 512; 3061 iov[1].iov_base = (void *)0x20000; 3062 iov[1].iov_len = 20 * 512; 3063 iov[2].iov_base = (void *)0x30000; 3064 iov[2].iov_len = 11 * 512; 3065 3066 g_io_done = false; 3067 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 3068 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 3069 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 3070 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3071 3072 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 3073 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 3074 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3075 3076 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 3077 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 3078 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 3079 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3080 3081 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 3082 CU_ASSERT(rc == 0); 3083 CU_ASSERT(g_io_done == false); 3084 3085 /* The following children will be submitted sequentially due to the capacity of 3086 * spdk_bdev_io. 3087 */ 3088 3089 /* Completing the first child will submit the second child */ 3090 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3091 stub_complete_io(1); 3092 CU_ASSERT(g_io_done == false); 3093 3094 /* Completing the second child will submit the third child */ 3095 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3096 stub_complete_io(1); 3097 CU_ASSERT(g_io_done == false); 3098 3099 /* Completing the third child will result in our callback getting invoked 3100 * since the parent I/O is now complete. 3101 */ 3102 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3103 stub_complete_io(1); 3104 CU_ASSERT(g_io_done == true); 3105 3106 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 3107 3108 spdk_put_io_channel(io_ch); 3109 spdk_bdev_close(desc); 3110 free_bdev(bdev); 3111 ut_fini_bdev(); 3112 } 3113 3114 static void 3115 bdev_io_write_unit_split_test(void) 3116 { 3117 struct spdk_bdev *bdev; 3118 struct spdk_bdev_desc *desc = NULL; 3119 struct spdk_io_channel *io_ch; 3120 struct spdk_bdev_opts bdev_opts = {}; 3121 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 3122 struct ut_expected_io *expected_io; 3123 uint64_t i; 3124 int rc; 3125 3126 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3127 bdev_opts.bdev_io_pool_size = 512; 3128 bdev_opts.bdev_io_cache_size = 64; 3129 ut_init_bdev(&bdev_opts); 3130 3131 bdev = allocate_bdev("bdev0"); 3132 3133 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 3134 CU_ASSERT(rc == 0); 3135 SPDK_CU_ASSERT_FATAL(desc != NULL); 3136 io_ch = spdk_bdev_get_io_channel(desc); 3137 CU_ASSERT(io_ch != NULL); 3138 3139 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 3140 bdev->write_unit_size = 32; 3141 bdev->split_on_write_unit = true; 3142 g_io_done = false; 3143 3144 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 3145 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 3146 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3147 3148 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 3149 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 3150 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3151 3152 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3153 CU_ASSERT(rc == 0); 3154 CU_ASSERT(g_io_done == false); 3155 3156 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3157 stub_complete_io(2); 3158 CU_ASSERT(g_io_done == true); 3159 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3160 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3161 3162 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 3163 * based on write_unit_size, not optimal_io_boundary */ 3164 bdev->split_on_optimal_io_boundary = true; 3165 bdev->optimal_io_boundary = 16; 3166 g_io_done = false; 3167 3168 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3169 CU_ASSERT(rc == 0); 3170 CU_ASSERT(g_io_done == false); 3171 3172 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3173 stub_complete_io(2); 3174 CU_ASSERT(g_io_done == true); 3175 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3176 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3177 3178 /* Write I/O should fail if it is smaller than write_unit_size */ 3179 g_io_done = false; 3180 3181 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3182 CU_ASSERT(rc == 0); 3183 CU_ASSERT(g_io_done == false); 3184 3185 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3186 poll_threads(); 3187 CU_ASSERT(g_io_done == true); 3188 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3189 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3190 3191 /* Same for I/O not aligned to write_unit_size */ 3192 g_io_done = false; 3193 3194 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3195 CU_ASSERT(rc == 0); 3196 CU_ASSERT(g_io_done == false); 3197 3198 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3199 poll_threads(); 3200 CU_ASSERT(g_io_done == true); 3201 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3202 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3203 3204 /* Write should fail if it needs to be split but there are not enough iovs to submit 3205 * an entire write unit */ 3206 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3207 g_io_done = false; 3208 3209 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3210 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3211 iov[i].iov_len = 512; 3212 } 3213 3214 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3215 io_done, NULL); 3216 CU_ASSERT(rc == 0); 3217 CU_ASSERT(g_io_done == false); 3218 3219 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3220 poll_threads(); 3221 CU_ASSERT(g_io_done == true); 3222 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3223 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3224 3225 spdk_put_io_channel(io_ch); 3226 spdk_bdev_close(desc); 3227 free_bdev(bdev); 3228 ut_fini_bdev(); 3229 } 3230 3231 static void 3232 bdev_io_alignment(void) 3233 { 3234 struct spdk_bdev *bdev; 3235 struct spdk_bdev_desc *desc = NULL; 3236 struct spdk_io_channel *io_ch; 3237 struct spdk_bdev_opts bdev_opts = {}; 3238 int rc; 3239 void *buf = NULL; 3240 struct iovec iovs[2]; 3241 int iovcnt; 3242 uint64_t alignment; 3243 3244 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3245 bdev_opts.bdev_io_pool_size = 20; 3246 bdev_opts.bdev_io_cache_size = 2; 3247 ut_init_bdev(&bdev_opts); 3248 3249 fn_table.submit_request = stub_submit_request_get_buf; 3250 bdev = allocate_bdev("bdev0"); 3251 3252 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3253 CU_ASSERT(rc == 0); 3254 CU_ASSERT(desc != NULL); 3255 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3256 io_ch = spdk_bdev_get_io_channel(desc); 3257 CU_ASSERT(io_ch != NULL); 3258 3259 /* Create aligned buffer */ 3260 rc = posix_memalign(&buf, 4096, 8192); 3261 SPDK_CU_ASSERT_FATAL(rc == 0); 3262 3263 /* Pass aligned single buffer with no alignment required */ 3264 alignment = 1; 3265 bdev->required_alignment = spdk_u32log2(alignment); 3266 3267 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3268 CU_ASSERT(rc == 0); 3269 stub_complete_io(1); 3270 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3271 alignment)); 3272 3273 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3274 CU_ASSERT(rc == 0); 3275 stub_complete_io(1); 3276 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3277 alignment)); 3278 3279 /* Pass unaligned single buffer with no alignment required */ 3280 alignment = 1; 3281 bdev->required_alignment = spdk_u32log2(alignment); 3282 3283 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3284 CU_ASSERT(rc == 0); 3285 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3286 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3287 stub_complete_io(1); 3288 3289 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3290 CU_ASSERT(rc == 0); 3291 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3292 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3293 stub_complete_io(1); 3294 3295 /* Pass unaligned single buffer with 512 alignment required */ 3296 alignment = 512; 3297 bdev->required_alignment = spdk_u32log2(alignment); 3298 3299 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3300 CU_ASSERT(rc == 0); 3301 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3302 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3303 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3304 alignment)); 3305 stub_complete_io(1); 3306 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3307 3308 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3309 CU_ASSERT(rc == 0); 3310 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3311 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3312 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3313 alignment)); 3314 stub_complete_io(1); 3315 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3316 3317 /* Pass unaligned single buffer with 4096 alignment required */ 3318 alignment = 4096; 3319 bdev->required_alignment = spdk_u32log2(alignment); 3320 3321 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3322 CU_ASSERT(rc == 0); 3323 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3324 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3325 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3326 alignment)); 3327 stub_complete_io(1); 3328 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3329 3330 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3331 CU_ASSERT(rc == 0); 3332 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3333 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3334 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3335 alignment)); 3336 stub_complete_io(1); 3337 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3338 3339 /* Pass aligned iovs with no alignment required */ 3340 alignment = 1; 3341 bdev->required_alignment = spdk_u32log2(alignment); 3342 3343 iovcnt = 1; 3344 iovs[0].iov_base = buf; 3345 iovs[0].iov_len = 512; 3346 3347 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3348 CU_ASSERT(rc == 0); 3349 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3350 stub_complete_io(1); 3351 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3352 3353 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3354 CU_ASSERT(rc == 0); 3355 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3356 stub_complete_io(1); 3357 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3358 3359 /* Pass unaligned iovs with no alignment required */ 3360 alignment = 1; 3361 bdev->required_alignment = spdk_u32log2(alignment); 3362 3363 iovcnt = 2; 3364 iovs[0].iov_base = buf + 16; 3365 iovs[0].iov_len = 256; 3366 iovs[1].iov_base = buf + 16 + 256 + 32; 3367 iovs[1].iov_len = 256; 3368 3369 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3370 CU_ASSERT(rc == 0); 3371 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3372 stub_complete_io(1); 3373 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3374 3375 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3376 CU_ASSERT(rc == 0); 3377 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3378 stub_complete_io(1); 3379 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3380 3381 /* Pass unaligned iov with 2048 alignment required */ 3382 alignment = 2048; 3383 bdev->required_alignment = spdk_u32log2(alignment); 3384 3385 iovcnt = 2; 3386 iovs[0].iov_base = buf + 16; 3387 iovs[0].iov_len = 256; 3388 iovs[1].iov_base = buf + 16 + 256 + 32; 3389 iovs[1].iov_len = 256; 3390 3391 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3392 CU_ASSERT(rc == 0); 3393 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt); 3394 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3395 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3396 alignment)); 3397 stub_complete_io(1); 3398 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3399 3400 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3401 CU_ASSERT(rc == 0); 3402 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt); 3403 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3404 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3405 alignment)); 3406 stub_complete_io(1); 3407 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3408 3409 /* Pass iov without allocated buffer without alignment required */ 3410 alignment = 1; 3411 bdev->required_alignment = spdk_u32log2(alignment); 3412 3413 iovcnt = 1; 3414 iovs[0].iov_base = NULL; 3415 iovs[0].iov_len = 0; 3416 3417 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3418 CU_ASSERT(rc == 0); 3419 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3420 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3421 alignment)); 3422 stub_complete_io(1); 3423 3424 /* Pass iov without allocated buffer with 1024 alignment required */ 3425 alignment = 1024; 3426 bdev->required_alignment = spdk_u32log2(alignment); 3427 3428 iovcnt = 1; 3429 iovs[0].iov_base = NULL; 3430 iovs[0].iov_len = 0; 3431 3432 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3433 CU_ASSERT(rc == 0); 3434 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3435 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3436 alignment)); 3437 stub_complete_io(1); 3438 3439 spdk_put_io_channel(io_ch); 3440 spdk_bdev_close(desc); 3441 free_bdev(bdev); 3442 fn_table.submit_request = stub_submit_request; 3443 ut_fini_bdev(); 3444 3445 free(buf); 3446 } 3447 3448 static void 3449 bdev_io_alignment_with_boundary(void) 3450 { 3451 struct spdk_bdev *bdev; 3452 struct spdk_bdev_desc *desc = NULL; 3453 struct spdk_io_channel *io_ch; 3454 struct spdk_bdev_opts bdev_opts = {}; 3455 int rc; 3456 void *buf = NULL; 3457 struct iovec iovs[2]; 3458 int iovcnt; 3459 uint64_t alignment; 3460 3461 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3462 bdev_opts.bdev_io_pool_size = 20; 3463 bdev_opts.bdev_io_cache_size = 2; 3464 bdev_opts.opts_size = sizeof(bdev_opts); 3465 ut_init_bdev(&bdev_opts); 3466 3467 fn_table.submit_request = stub_submit_request_get_buf; 3468 bdev = allocate_bdev("bdev0"); 3469 3470 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3471 CU_ASSERT(rc == 0); 3472 CU_ASSERT(desc != NULL); 3473 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3474 io_ch = spdk_bdev_get_io_channel(desc); 3475 CU_ASSERT(io_ch != NULL); 3476 3477 /* Create aligned buffer */ 3478 rc = posix_memalign(&buf, 4096, 131072); 3479 SPDK_CU_ASSERT_FATAL(rc == 0); 3480 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3481 3482 #ifdef NOTDEF 3483 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3484 alignment = 512; 3485 bdev->required_alignment = spdk_u32log2(alignment); 3486 bdev->optimal_io_boundary = 2; 3487 bdev->split_on_optimal_io_boundary = true; 3488 3489 iovcnt = 1; 3490 iovs[0].iov_base = NULL; 3491 iovs[0].iov_len = 512 * 3; 3492 3493 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3494 CU_ASSERT(rc == 0); 3495 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3496 stub_complete_io(2); 3497 3498 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3499 alignment = 512; 3500 bdev->required_alignment = spdk_u32log2(alignment); 3501 bdev->optimal_io_boundary = 16; 3502 bdev->split_on_optimal_io_boundary = true; 3503 3504 iovcnt = 1; 3505 iovs[0].iov_base = NULL; 3506 iovs[0].iov_len = 512 * 16; 3507 3508 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3509 CU_ASSERT(rc == 0); 3510 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3511 stub_complete_io(2); 3512 3513 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3514 alignment = 512; 3515 bdev->required_alignment = spdk_u32log2(alignment); 3516 bdev->optimal_io_boundary = 128; 3517 bdev->split_on_optimal_io_boundary = true; 3518 3519 iovcnt = 1; 3520 iovs[0].iov_base = buf + 16; 3521 iovs[0].iov_len = 512 * 160; 3522 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3523 CU_ASSERT(rc == 0); 3524 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3525 stub_complete_io(2); 3526 3527 #endif 3528 3529 /* 512 * 3 with 2 IO boundary */ 3530 alignment = 512; 3531 bdev->required_alignment = spdk_u32log2(alignment); 3532 bdev->optimal_io_boundary = 2; 3533 bdev->split_on_optimal_io_boundary = true; 3534 3535 iovcnt = 2; 3536 iovs[0].iov_base = buf + 16; 3537 iovs[0].iov_len = 512; 3538 iovs[1].iov_base = buf + 16 + 512 + 32; 3539 iovs[1].iov_len = 1024; 3540 3541 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3542 CU_ASSERT(rc == 0); 3543 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3544 stub_complete_io(2); 3545 3546 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3547 CU_ASSERT(rc == 0); 3548 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3549 stub_complete_io(2); 3550 3551 /* 512 * 64 with 32 IO boundary */ 3552 bdev->optimal_io_boundary = 32; 3553 iovcnt = 2; 3554 iovs[0].iov_base = buf + 16; 3555 iovs[0].iov_len = 16384; 3556 iovs[1].iov_base = buf + 16 + 16384 + 32; 3557 iovs[1].iov_len = 16384; 3558 3559 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3560 CU_ASSERT(rc == 0); 3561 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3562 stub_complete_io(3); 3563 3564 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3565 CU_ASSERT(rc == 0); 3566 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3567 stub_complete_io(3); 3568 3569 /* 512 * 160 with 32 IO boundary */ 3570 iovcnt = 1; 3571 iovs[0].iov_base = buf + 16; 3572 iovs[0].iov_len = 16384 + 65536; 3573 3574 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3575 CU_ASSERT(rc == 0); 3576 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3577 stub_complete_io(6); 3578 3579 spdk_put_io_channel(io_ch); 3580 spdk_bdev_close(desc); 3581 free_bdev(bdev); 3582 fn_table.submit_request = stub_submit_request; 3583 ut_fini_bdev(); 3584 3585 free(buf); 3586 } 3587 3588 static void 3589 histogram_status_cb(void *cb_arg, int status) 3590 { 3591 g_status = status; 3592 } 3593 3594 static void 3595 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3596 { 3597 g_status = status; 3598 g_histogram = histogram; 3599 } 3600 3601 static void 3602 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3603 uint64_t total, uint64_t so_far) 3604 { 3605 g_count += count; 3606 } 3607 3608 static void 3609 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3610 { 3611 spdk_histogram_data_fn cb_fn = cb_arg; 3612 3613 g_status = status; 3614 3615 if (status == 0) { 3616 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3617 } 3618 } 3619 3620 static void 3621 bdev_histograms(void) 3622 { 3623 struct spdk_bdev *bdev; 3624 struct spdk_bdev_desc *desc = NULL; 3625 struct spdk_io_channel *ch; 3626 struct spdk_histogram_data *histogram; 3627 uint8_t buf[4096]; 3628 int rc; 3629 3630 ut_init_bdev(NULL); 3631 3632 bdev = allocate_bdev("bdev"); 3633 3634 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3635 CU_ASSERT(rc == 0); 3636 CU_ASSERT(desc != NULL); 3637 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3638 3639 ch = spdk_bdev_get_io_channel(desc); 3640 CU_ASSERT(ch != NULL); 3641 3642 /* Enable histogram */ 3643 g_status = -1; 3644 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3645 poll_threads(); 3646 CU_ASSERT(g_status == 0); 3647 CU_ASSERT(bdev->internal.histogram_enabled == true); 3648 3649 /* Allocate histogram */ 3650 histogram = spdk_histogram_data_alloc(); 3651 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3652 3653 /* Check if histogram is zeroed */ 3654 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3655 poll_threads(); 3656 CU_ASSERT(g_status == 0); 3657 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3658 3659 g_count = 0; 3660 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3661 3662 CU_ASSERT(g_count == 0); 3663 3664 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3665 CU_ASSERT(rc == 0); 3666 3667 spdk_delay_us(10); 3668 stub_complete_io(1); 3669 poll_threads(); 3670 3671 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3672 CU_ASSERT(rc == 0); 3673 3674 spdk_delay_us(10); 3675 stub_complete_io(1); 3676 poll_threads(); 3677 3678 /* Check if histogram gathered data from all I/O channels */ 3679 g_histogram = NULL; 3680 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3681 poll_threads(); 3682 CU_ASSERT(g_status == 0); 3683 CU_ASSERT(bdev->internal.histogram_enabled == true); 3684 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3685 3686 g_count = 0; 3687 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3688 CU_ASSERT(g_count == 2); 3689 3690 g_count = 0; 3691 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3692 CU_ASSERT(g_status == 0); 3693 CU_ASSERT(g_count == 2); 3694 3695 /* Disable histogram */ 3696 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3697 poll_threads(); 3698 CU_ASSERT(g_status == 0); 3699 CU_ASSERT(bdev->internal.histogram_enabled == false); 3700 3701 /* Try to run histogram commands on disabled bdev */ 3702 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3703 poll_threads(); 3704 CU_ASSERT(g_status == -EFAULT); 3705 3706 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3707 CU_ASSERT(g_status == -EFAULT); 3708 3709 spdk_histogram_data_free(histogram); 3710 spdk_put_io_channel(ch); 3711 spdk_bdev_close(desc); 3712 free_bdev(bdev); 3713 ut_fini_bdev(); 3714 } 3715 3716 static void 3717 _bdev_compare(bool emulated) 3718 { 3719 struct spdk_bdev *bdev; 3720 struct spdk_bdev_desc *desc = NULL; 3721 struct spdk_io_channel *ioch; 3722 struct ut_expected_io *expected_io; 3723 uint64_t offset, num_blocks; 3724 uint32_t num_completed; 3725 char aa_buf[512]; 3726 char bb_buf[512]; 3727 struct iovec compare_iov; 3728 uint8_t expected_io_type; 3729 int rc; 3730 3731 if (emulated) { 3732 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3733 } else { 3734 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3735 } 3736 3737 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3738 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3739 3740 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3741 3742 ut_init_bdev(NULL); 3743 fn_table.submit_request = stub_submit_request_get_buf; 3744 bdev = allocate_bdev("bdev"); 3745 3746 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3747 CU_ASSERT_EQUAL(rc, 0); 3748 SPDK_CU_ASSERT_FATAL(desc != NULL); 3749 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3750 ioch = spdk_bdev_get_io_channel(desc); 3751 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3752 3753 fn_table.submit_request = stub_submit_request_get_buf; 3754 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3755 3756 offset = 50; 3757 num_blocks = 1; 3758 compare_iov.iov_base = aa_buf; 3759 compare_iov.iov_len = sizeof(aa_buf); 3760 3761 /* 1. successful comparev */ 3762 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3763 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3764 3765 g_io_done = false; 3766 g_compare_read_buf = aa_buf; 3767 g_compare_read_buf_len = sizeof(aa_buf); 3768 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3769 CU_ASSERT_EQUAL(rc, 0); 3770 num_completed = stub_complete_io(1); 3771 CU_ASSERT_EQUAL(num_completed, 1); 3772 CU_ASSERT(g_io_done == true); 3773 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3774 3775 /* 2. miscompare comparev */ 3776 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3777 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3778 3779 g_io_done = false; 3780 g_compare_read_buf = bb_buf; 3781 g_compare_read_buf_len = sizeof(bb_buf); 3782 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3783 CU_ASSERT_EQUAL(rc, 0); 3784 num_completed = stub_complete_io(1); 3785 CU_ASSERT_EQUAL(num_completed, 1); 3786 CU_ASSERT(g_io_done == true); 3787 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3788 3789 /* 3. successful compare */ 3790 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3791 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3792 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3793 3794 g_io_done = false; 3795 g_compare_read_buf = aa_buf; 3796 g_compare_read_buf_len = sizeof(aa_buf); 3797 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3798 CU_ASSERT_EQUAL(rc, 0); 3799 num_completed = stub_complete_io(1); 3800 CU_ASSERT_EQUAL(num_completed, 1); 3801 CU_ASSERT(g_io_done == true); 3802 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3803 3804 /* 4. miscompare compare */ 3805 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3806 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3807 3808 g_io_done = false; 3809 g_compare_read_buf = bb_buf; 3810 g_compare_read_buf_len = sizeof(bb_buf); 3811 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3812 CU_ASSERT_EQUAL(rc, 0); 3813 num_completed = stub_complete_io(1); 3814 CU_ASSERT_EQUAL(num_completed, 1); 3815 CU_ASSERT(g_io_done == true); 3816 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3817 3818 spdk_put_io_channel(ioch); 3819 spdk_bdev_close(desc); 3820 free_bdev(bdev); 3821 fn_table.submit_request = stub_submit_request; 3822 ut_fini_bdev(); 3823 3824 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3825 3826 g_compare_read_buf = NULL; 3827 } 3828 3829 static void 3830 _bdev_compare_with_md(bool emulated) 3831 { 3832 struct spdk_bdev *bdev; 3833 struct spdk_bdev_desc *desc = NULL; 3834 struct spdk_io_channel *ioch; 3835 struct ut_expected_io *expected_io; 3836 uint64_t offset, num_blocks; 3837 uint32_t num_completed; 3838 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3839 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3840 char buf_miscompare[1024 /* 2 * blocklen */]; 3841 char md_buf[16]; 3842 char md_buf_miscompare[16]; 3843 struct iovec compare_iov; 3844 uint8_t expected_io_type; 3845 int rc; 3846 3847 if (emulated) { 3848 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3849 } else { 3850 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3851 } 3852 3853 memset(buf, 0xaa, sizeof(buf)); 3854 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3855 /* make last md different */ 3856 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3857 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3858 memset(md_buf, 0xaa, 16); 3859 memset(md_buf_miscompare, 0xbb, 16); 3860 3861 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3862 3863 ut_init_bdev(NULL); 3864 fn_table.submit_request = stub_submit_request_get_buf; 3865 bdev = allocate_bdev("bdev"); 3866 3867 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3868 CU_ASSERT_EQUAL(rc, 0); 3869 SPDK_CU_ASSERT_FATAL(desc != NULL); 3870 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3871 ioch = spdk_bdev_get_io_channel(desc); 3872 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3873 3874 fn_table.submit_request = stub_submit_request_get_buf; 3875 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3876 3877 offset = 50; 3878 num_blocks = 2; 3879 3880 /* interleaved md & data */ 3881 bdev->md_interleave = true; 3882 bdev->md_len = 8; 3883 bdev->blocklen = 512 + 8; 3884 compare_iov.iov_base = buf; 3885 compare_iov.iov_len = sizeof(buf); 3886 3887 /* 1. successful compare with md interleaved */ 3888 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3889 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3890 3891 g_io_done = false; 3892 g_compare_read_buf = buf; 3893 g_compare_read_buf_len = sizeof(buf); 3894 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3895 CU_ASSERT_EQUAL(rc, 0); 3896 num_completed = stub_complete_io(1); 3897 CU_ASSERT_EQUAL(num_completed, 1); 3898 CU_ASSERT(g_io_done == true); 3899 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3900 3901 /* 2. miscompare with md interleaved */ 3902 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3903 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3904 3905 g_io_done = false; 3906 g_compare_read_buf = buf_interleaved_miscompare; 3907 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3908 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3909 CU_ASSERT_EQUAL(rc, 0); 3910 num_completed = stub_complete_io(1); 3911 CU_ASSERT_EQUAL(num_completed, 1); 3912 CU_ASSERT(g_io_done == true); 3913 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3914 3915 /* Separate data & md buffers */ 3916 bdev->md_interleave = false; 3917 bdev->blocklen = 512; 3918 compare_iov.iov_base = buf; 3919 compare_iov.iov_len = 1024; 3920 3921 /* 3. successful compare with md separated */ 3922 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3923 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3924 3925 g_io_done = false; 3926 g_compare_read_buf = buf; 3927 g_compare_read_buf_len = 1024; 3928 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3929 g_compare_md_buf = md_buf; 3930 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3931 offset, num_blocks, io_done, NULL); 3932 CU_ASSERT_EQUAL(rc, 0); 3933 num_completed = stub_complete_io(1); 3934 CU_ASSERT_EQUAL(num_completed, 1); 3935 CU_ASSERT(g_io_done == true); 3936 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3937 3938 /* 4. miscompare with md separated where md buf is different */ 3939 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3940 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3941 3942 g_io_done = false; 3943 g_compare_read_buf = buf; 3944 g_compare_read_buf_len = 1024; 3945 g_compare_md_buf = md_buf_miscompare; 3946 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3947 offset, num_blocks, io_done, NULL); 3948 CU_ASSERT_EQUAL(rc, 0); 3949 num_completed = stub_complete_io(1); 3950 CU_ASSERT_EQUAL(num_completed, 1); 3951 CU_ASSERT(g_io_done == true); 3952 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3953 3954 /* 5. miscompare with md separated where buf is different */ 3955 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3956 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3957 3958 g_io_done = false; 3959 g_compare_read_buf = buf_miscompare; 3960 g_compare_read_buf_len = sizeof(buf_miscompare); 3961 g_compare_md_buf = md_buf; 3962 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3963 offset, num_blocks, io_done, NULL); 3964 CU_ASSERT_EQUAL(rc, 0); 3965 num_completed = stub_complete_io(1); 3966 CU_ASSERT_EQUAL(num_completed, 1); 3967 CU_ASSERT(g_io_done == true); 3968 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3969 3970 bdev->md_len = 0; 3971 g_compare_md_buf = NULL; 3972 3973 spdk_put_io_channel(ioch); 3974 spdk_bdev_close(desc); 3975 free_bdev(bdev); 3976 fn_table.submit_request = stub_submit_request; 3977 ut_fini_bdev(); 3978 3979 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3980 3981 g_compare_read_buf = NULL; 3982 } 3983 3984 static void 3985 bdev_compare(void) 3986 { 3987 _bdev_compare(false); 3988 _bdev_compare_with_md(false); 3989 } 3990 3991 static void 3992 bdev_compare_emulated(void) 3993 { 3994 _bdev_compare(true); 3995 _bdev_compare_with_md(true); 3996 } 3997 3998 static void 3999 bdev_compare_and_write(void) 4000 { 4001 struct spdk_bdev *bdev; 4002 struct spdk_bdev_desc *desc = NULL; 4003 struct spdk_io_channel *ioch; 4004 struct ut_expected_io *expected_io; 4005 uint64_t offset, num_blocks; 4006 uint32_t num_completed; 4007 char aa_buf[512]; 4008 char bb_buf[512]; 4009 char cc_buf[512]; 4010 char write_buf[512]; 4011 struct iovec compare_iov; 4012 struct iovec write_iov; 4013 int rc; 4014 4015 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4016 memset(bb_buf, 0xbb, sizeof(bb_buf)); 4017 memset(cc_buf, 0xcc, sizeof(cc_buf)); 4018 4019 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 4020 4021 ut_init_bdev(NULL); 4022 fn_table.submit_request = stub_submit_request_get_buf; 4023 bdev = allocate_bdev("bdev"); 4024 4025 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4026 CU_ASSERT_EQUAL(rc, 0); 4027 SPDK_CU_ASSERT_FATAL(desc != NULL); 4028 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4029 ioch = spdk_bdev_get_io_channel(desc); 4030 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4031 4032 fn_table.submit_request = stub_submit_request_get_buf; 4033 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4034 4035 offset = 50; 4036 num_blocks = 1; 4037 compare_iov.iov_base = aa_buf; 4038 compare_iov.iov_len = sizeof(aa_buf); 4039 write_iov.iov_base = bb_buf; 4040 write_iov.iov_len = sizeof(bb_buf); 4041 4042 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4043 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4044 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 4045 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4046 4047 g_io_done = false; 4048 g_compare_read_buf = aa_buf; 4049 g_compare_read_buf_len = sizeof(aa_buf); 4050 memset(write_buf, 0, sizeof(write_buf)); 4051 g_compare_write_buf = write_buf; 4052 g_compare_write_buf_len = sizeof(write_buf); 4053 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4054 offset, num_blocks, io_done, NULL); 4055 /* Trigger range locking */ 4056 poll_threads(); 4057 CU_ASSERT_EQUAL(rc, 0); 4058 num_completed = stub_complete_io(1); 4059 CU_ASSERT_EQUAL(num_completed, 1); 4060 CU_ASSERT(g_io_done == false); 4061 num_completed = stub_complete_io(1); 4062 /* Trigger range unlocking */ 4063 poll_threads(); 4064 CU_ASSERT_EQUAL(num_completed, 1); 4065 CU_ASSERT(g_io_done == true); 4066 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4067 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 4068 4069 /* Test miscompare */ 4070 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4071 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4072 4073 g_io_done = false; 4074 g_compare_read_buf = cc_buf; 4075 g_compare_read_buf_len = sizeof(cc_buf); 4076 memset(write_buf, 0, sizeof(write_buf)); 4077 g_compare_write_buf = write_buf; 4078 g_compare_write_buf_len = sizeof(write_buf); 4079 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4080 offset, num_blocks, io_done, NULL); 4081 /* Trigger range locking */ 4082 poll_threads(); 4083 CU_ASSERT_EQUAL(rc, 0); 4084 num_completed = stub_complete_io(1); 4085 /* Trigger range unlocking earlier because we expect error here */ 4086 poll_threads(); 4087 CU_ASSERT_EQUAL(num_completed, 1); 4088 CU_ASSERT(g_io_done == true); 4089 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 4090 num_completed = stub_complete_io(1); 4091 CU_ASSERT_EQUAL(num_completed, 0); 4092 4093 spdk_put_io_channel(ioch); 4094 spdk_bdev_close(desc); 4095 free_bdev(bdev); 4096 fn_table.submit_request = stub_submit_request; 4097 ut_fini_bdev(); 4098 4099 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 4100 4101 g_compare_read_buf = NULL; 4102 g_compare_write_buf = NULL; 4103 } 4104 4105 static void 4106 bdev_write_zeroes(void) 4107 { 4108 struct spdk_bdev *bdev; 4109 struct spdk_bdev_desc *desc = NULL; 4110 struct spdk_io_channel *ioch; 4111 struct ut_expected_io *expected_io; 4112 uint64_t offset, num_io_blocks, num_blocks; 4113 uint32_t num_completed, num_requests; 4114 int rc; 4115 4116 ut_init_bdev(NULL); 4117 bdev = allocate_bdev("bdev"); 4118 4119 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4120 CU_ASSERT_EQUAL(rc, 0); 4121 SPDK_CU_ASSERT_FATAL(desc != NULL); 4122 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4123 ioch = spdk_bdev_get_io_channel(desc); 4124 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4125 4126 fn_table.submit_request = stub_submit_request; 4127 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4128 4129 /* First test that if the bdev supports write_zeroes, the request won't be split */ 4130 bdev->md_len = 0; 4131 bdev->blocklen = 4096; 4132 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4133 4134 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4135 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4136 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4137 CU_ASSERT_EQUAL(rc, 0); 4138 num_completed = stub_complete_io(1); 4139 CU_ASSERT_EQUAL(num_completed, 1); 4140 4141 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 4142 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 4143 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4144 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 4145 num_requests = 2; 4146 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 4147 4148 for (offset = 0; offset < num_requests; ++offset) { 4149 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4150 offset * num_io_blocks, num_io_blocks, 0); 4151 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4152 } 4153 4154 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4155 CU_ASSERT_EQUAL(rc, 0); 4156 num_completed = stub_complete_io(num_requests); 4157 CU_ASSERT_EQUAL(num_completed, num_requests); 4158 4159 /* Check that the splitting is correct if bdev has interleaved metadata */ 4160 bdev->md_interleave = true; 4161 bdev->md_len = 64; 4162 bdev->blocklen = 4096 + 64; 4163 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4164 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4165 4166 num_requests = offset = 0; 4167 while (offset < num_blocks) { 4168 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 4169 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4170 offset, num_io_blocks, 0); 4171 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4172 offset += num_io_blocks; 4173 num_requests++; 4174 } 4175 4176 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4177 CU_ASSERT_EQUAL(rc, 0); 4178 num_completed = stub_complete_io(num_requests); 4179 CU_ASSERT_EQUAL(num_completed, num_requests); 4180 num_completed = stub_complete_io(num_requests); 4181 assert(num_completed == 0); 4182 4183 /* Check the the same for separate metadata buffer */ 4184 bdev->md_interleave = false; 4185 bdev->md_len = 64; 4186 bdev->blocklen = 4096; 4187 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4188 4189 num_requests = offset = 0; 4190 while (offset < num_blocks) { 4191 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 4192 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4193 offset, num_io_blocks, 0); 4194 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 4195 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4196 offset += num_io_blocks; 4197 num_requests++; 4198 } 4199 4200 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4201 CU_ASSERT_EQUAL(rc, 0); 4202 num_completed = stub_complete_io(num_requests); 4203 CU_ASSERT_EQUAL(num_completed, num_requests); 4204 4205 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 4206 spdk_put_io_channel(ioch); 4207 spdk_bdev_close(desc); 4208 free_bdev(bdev); 4209 ut_fini_bdev(); 4210 } 4211 4212 static void 4213 bdev_zcopy_write(void) 4214 { 4215 struct spdk_bdev *bdev; 4216 struct spdk_bdev_desc *desc = NULL; 4217 struct spdk_io_channel *ioch; 4218 struct ut_expected_io *expected_io; 4219 uint64_t offset, num_blocks; 4220 uint32_t num_completed; 4221 char aa_buf[512]; 4222 struct iovec iov; 4223 int rc; 4224 const bool populate = false; 4225 const bool commit = true; 4226 4227 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4228 4229 ut_init_bdev(NULL); 4230 bdev = allocate_bdev("bdev"); 4231 4232 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4233 CU_ASSERT_EQUAL(rc, 0); 4234 SPDK_CU_ASSERT_FATAL(desc != NULL); 4235 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4236 ioch = spdk_bdev_get_io_channel(desc); 4237 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4238 4239 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4240 4241 offset = 50; 4242 num_blocks = 1; 4243 iov.iov_base = NULL; 4244 iov.iov_len = 0; 4245 4246 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4247 g_zcopy_read_buf_len = (uint32_t) -1; 4248 /* Do a zcopy start for a write (populate=false) */ 4249 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4250 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4251 g_io_done = false; 4252 g_zcopy_write_buf = aa_buf; 4253 g_zcopy_write_buf_len = sizeof(aa_buf); 4254 g_zcopy_bdev_io = NULL; 4255 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4256 CU_ASSERT_EQUAL(rc, 0); 4257 num_completed = stub_complete_io(1); 4258 CU_ASSERT_EQUAL(num_completed, 1); 4259 CU_ASSERT(g_io_done == true); 4260 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4261 /* Check that the iov has been set up */ 4262 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4263 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4264 /* Check that the bdev_io has been saved */ 4265 CU_ASSERT(g_zcopy_bdev_io != NULL); 4266 /* Now do the zcopy end for a write (commit=true) */ 4267 g_io_done = false; 4268 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4269 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4270 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4271 CU_ASSERT_EQUAL(rc, 0); 4272 num_completed = stub_complete_io(1); 4273 CU_ASSERT_EQUAL(num_completed, 1); 4274 CU_ASSERT(g_io_done == true); 4275 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4276 /* Check the g_zcopy are reset by io_done */ 4277 CU_ASSERT(g_zcopy_write_buf == NULL); 4278 CU_ASSERT(g_zcopy_write_buf_len == 0); 4279 /* Check that io_done has freed the g_zcopy_bdev_io */ 4280 CU_ASSERT(g_zcopy_bdev_io == NULL); 4281 4282 /* Check the zcopy read buffer has not been touched which 4283 * ensures that the correct buffers were used. 4284 */ 4285 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4286 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4287 4288 spdk_put_io_channel(ioch); 4289 spdk_bdev_close(desc); 4290 free_bdev(bdev); 4291 ut_fini_bdev(); 4292 } 4293 4294 static void 4295 bdev_zcopy_read(void) 4296 { 4297 struct spdk_bdev *bdev; 4298 struct spdk_bdev_desc *desc = NULL; 4299 struct spdk_io_channel *ioch; 4300 struct ut_expected_io *expected_io; 4301 uint64_t offset, num_blocks; 4302 uint32_t num_completed; 4303 char aa_buf[512]; 4304 struct iovec iov; 4305 int rc; 4306 const bool populate = true; 4307 const bool commit = false; 4308 4309 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4310 4311 ut_init_bdev(NULL); 4312 bdev = allocate_bdev("bdev"); 4313 4314 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4315 CU_ASSERT_EQUAL(rc, 0); 4316 SPDK_CU_ASSERT_FATAL(desc != NULL); 4317 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4318 ioch = spdk_bdev_get_io_channel(desc); 4319 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4320 4321 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4322 4323 offset = 50; 4324 num_blocks = 1; 4325 iov.iov_base = NULL; 4326 iov.iov_len = 0; 4327 4328 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4329 g_zcopy_write_buf_len = (uint32_t) -1; 4330 4331 /* Do a zcopy start for a read (populate=true) */ 4332 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4333 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4334 g_io_done = false; 4335 g_zcopy_read_buf = aa_buf; 4336 g_zcopy_read_buf_len = sizeof(aa_buf); 4337 g_zcopy_bdev_io = NULL; 4338 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4339 CU_ASSERT_EQUAL(rc, 0); 4340 num_completed = stub_complete_io(1); 4341 CU_ASSERT_EQUAL(num_completed, 1); 4342 CU_ASSERT(g_io_done == true); 4343 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4344 /* Check that the iov has been set up */ 4345 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4346 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4347 /* Check that the bdev_io has been saved */ 4348 CU_ASSERT(g_zcopy_bdev_io != NULL); 4349 4350 /* Now do the zcopy end for a read (commit=false) */ 4351 g_io_done = false; 4352 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4353 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4354 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4355 CU_ASSERT_EQUAL(rc, 0); 4356 num_completed = stub_complete_io(1); 4357 CU_ASSERT_EQUAL(num_completed, 1); 4358 CU_ASSERT(g_io_done == true); 4359 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4360 /* Check the g_zcopy are reset by io_done */ 4361 CU_ASSERT(g_zcopy_read_buf == NULL); 4362 CU_ASSERT(g_zcopy_read_buf_len == 0); 4363 /* Check that io_done has freed the g_zcopy_bdev_io */ 4364 CU_ASSERT(g_zcopy_bdev_io == NULL); 4365 4366 /* Check the zcopy write buffer has not been touched which 4367 * ensures that the correct buffers were used. 4368 */ 4369 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4370 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4371 4372 spdk_put_io_channel(ioch); 4373 spdk_bdev_close(desc); 4374 free_bdev(bdev); 4375 ut_fini_bdev(); 4376 } 4377 4378 static void 4379 bdev_open_while_hotremove(void) 4380 { 4381 struct spdk_bdev *bdev; 4382 struct spdk_bdev_desc *desc[2] = {}; 4383 int rc; 4384 4385 bdev = allocate_bdev("bdev"); 4386 4387 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4388 CU_ASSERT(rc == 0); 4389 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4390 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4391 4392 spdk_bdev_unregister(bdev, NULL, NULL); 4393 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4394 poll_threads(); 4395 4396 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4397 CU_ASSERT(rc == -ENODEV); 4398 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4399 4400 spdk_bdev_close(desc[0]); 4401 free_bdev(bdev); 4402 } 4403 4404 static void 4405 bdev_close_while_hotremove(void) 4406 { 4407 struct spdk_bdev *bdev; 4408 struct spdk_bdev_desc *desc = NULL; 4409 int rc = 0; 4410 4411 bdev = allocate_bdev("bdev"); 4412 4413 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4414 CU_ASSERT_EQUAL(rc, 0); 4415 SPDK_CU_ASSERT_FATAL(desc != NULL); 4416 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4417 4418 /* Simulate hot-unplug by unregistering bdev */ 4419 g_event_type1 = 0xFF; 4420 g_unregister_arg = NULL; 4421 g_unregister_rc = -1; 4422 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4423 /* Close device while remove event is in flight */ 4424 spdk_bdev_close(desc); 4425 4426 /* Ensure that unregister callback is delayed */ 4427 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4428 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4429 4430 poll_threads(); 4431 4432 /* Event callback shall not be issued because device was closed */ 4433 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4434 /* Unregister callback is issued */ 4435 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4436 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4437 4438 free_bdev(bdev); 4439 } 4440 4441 static void 4442 bdev_open_ext_test(void) 4443 { 4444 struct spdk_bdev *bdev; 4445 struct spdk_bdev_desc *desc1 = NULL; 4446 struct spdk_bdev_desc *desc2 = NULL; 4447 int rc = 0; 4448 4449 bdev = allocate_bdev("bdev"); 4450 4451 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4452 CU_ASSERT_EQUAL(rc, -EINVAL); 4453 4454 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4455 CU_ASSERT_EQUAL(rc, 0); 4456 4457 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4458 CU_ASSERT_EQUAL(rc, 0); 4459 4460 g_event_type1 = 0xFF; 4461 g_event_type2 = 0xFF; 4462 4463 /* Simulate hot-unplug by unregistering bdev */ 4464 spdk_bdev_unregister(bdev, NULL, NULL); 4465 poll_threads(); 4466 4467 /* Check if correct events have been triggered in event callback fn */ 4468 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4469 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4470 4471 free_bdev(bdev); 4472 poll_threads(); 4473 } 4474 4475 static void 4476 bdev_open_ext_unregister(void) 4477 { 4478 struct spdk_bdev *bdev; 4479 struct spdk_bdev_desc *desc1 = NULL; 4480 struct spdk_bdev_desc *desc2 = NULL; 4481 struct spdk_bdev_desc *desc3 = NULL; 4482 struct spdk_bdev_desc *desc4 = NULL; 4483 int rc = 0; 4484 4485 bdev = allocate_bdev("bdev"); 4486 4487 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4488 CU_ASSERT_EQUAL(rc, -EINVAL); 4489 4490 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4491 CU_ASSERT_EQUAL(rc, 0); 4492 4493 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4494 CU_ASSERT_EQUAL(rc, 0); 4495 4496 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4497 CU_ASSERT_EQUAL(rc, 0); 4498 4499 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4500 CU_ASSERT_EQUAL(rc, 0); 4501 4502 g_event_type1 = 0xFF; 4503 g_event_type2 = 0xFF; 4504 g_event_type3 = 0xFF; 4505 g_event_type4 = 0xFF; 4506 4507 g_unregister_arg = NULL; 4508 g_unregister_rc = -1; 4509 4510 /* Simulate hot-unplug by unregistering bdev */ 4511 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4512 4513 /* 4514 * Unregister is handled asynchronously and event callback 4515 * (i.e., above bdev_open_cbN) will be called. 4516 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4517 * close the desc3 and desc4 so that the bdev is not closed. 4518 */ 4519 poll_threads(); 4520 4521 /* Check if correct events have been triggered in event callback fn */ 4522 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4523 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4524 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4525 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4526 4527 /* Check that unregister callback is delayed */ 4528 CU_ASSERT(g_unregister_arg == NULL); 4529 CU_ASSERT(g_unregister_rc == -1); 4530 4531 /* 4532 * Explicitly close desc3. As desc4 is still opened there, the 4533 * unergister callback is still delayed to execute. 4534 */ 4535 spdk_bdev_close(desc3); 4536 CU_ASSERT(g_unregister_arg == NULL); 4537 CU_ASSERT(g_unregister_rc == -1); 4538 4539 /* 4540 * Explicitly close desc4 to trigger the ongoing bdev unregister 4541 * operation after last desc is closed. 4542 */ 4543 spdk_bdev_close(desc4); 4544 4545 /* Poll the thread for the async unregister operation */ 4546 poll_threads(); 4547 4548 /* Check that unregister callback is executed */ 4549 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4550 CU_ASSERT(g_unregister_rc == 0); 4551 4552 free_bdev(bdev); 4553 poll_threads(); 4554 } 4555 4556 struct timeout_io_cb_arg { 4557 struct iovec iov; 4558 uint8_t type; 4559 }; 4560 4561 static int 4562 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4563 { 4564 struct spdk_bdev_io *bdev_io; 4565 int n = 0; 4566 4567 if (!ch) { 4568 return -1; 4569 } 4570 4571 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4572 n++; 4573 } 4574 4575 return n; 4576 } 4577 4578 static void 4579 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4580 { 4581 struct timeout_io_cb_arg *ctx = cb_arg; 4582 4583 ctx->type = bdev_io->type; 4584 ctx->iov.iov_base = bdev_io->iov.iov_base; 4585 ctx->iov.iov_len = bdev_io->iov.iov_len; 4586 } 4587 4588 static void 4589 bdev_set_io_timeout(void) 4590 { 4591 struct spdk_bdev *bdev; 4592 struct spdk_bdev_desc *desc = NULL; 4593 struct spdk_io_channel *io_ch = NULL; 4594 struct spdk_bdev_channel *bdev_ch = NULL; 4595 struct timeout_io_cb_arg cb_arg; 4596 4597 ut_init_bdev(NULL); 4598 bdev = allocate_bdev("bdev"); 4599 4600 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4601 SPDK_CU_ASSERT_FATAL(desc != NULL); 4602 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4603 4604 io_ch = spdk_bdev_get_io_channel(desc); 4605 CU_ASSERT(io_ch != NULL); 4606 4607 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4608 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4609 4610 /* This is the part1. 4611 * We will check the bdev_ch->io_submitted list 4612 * TO make sure that it can link IOs and only the user submitted IOs 4613 */ 4614 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4615 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4616 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4617 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4618 stub_complete_io(1); 4619 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4620 stub_complete_io(1); 4621 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4622 4623 /* Split IO */ 4624 bdev->optimal_io_boundary = 16; 4625 bdev->split_on_optimal_io_boundary = true; 4626 4627 /* Now test that a single-vector command is split correctly. 4628 * Offset 14, length 8, payload 0xF000 4629 * Child - Offset 14, length 2, payload 0xF000 4630 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4631 * 4632 * Set up the expected values before calling spdk_bdev_read_blocks 4633 */ 4634 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4635 /* We count all submitted IOs including IO that are generated by splitting. */ 4636 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4637 stub_complete_io(1); 4638 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4639 stub_complete_io(1); 4640 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4641 4642 /* Also include the reset IO */ 4643 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4644 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4645 poll_threads(); 4646 stub_complete_io(1); 4647 poll_threads(); 4648 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4649 4650 /* This is part2 4651 * Test the desc timeout poller register 4652 */ 4653 4654 /* Successfully set the timeout */ 4655 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4656 CU_ASSERT(desc->io_timeout_poller != NULL); 4657 CU_ASSERT(desc->timeout_in_sec == 30); 4658 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4659 CU_ASSERT(desc->cb_arg == &cb_arg); 4660 4661 /* Change the timeout limit */ 4662 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4663 CU_ASSERT(desc->io_timeout_poller != NULL); 4664 CU_ASSERT(desc->timeout_in_sec == 20); 4665 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4666 CU_ASSERT(desc->cb_arg == &cb_arg); 4667 4668 /* Disable the timeout */ 4669 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4670 CU_ASSERT(desc->io_timeout_poller == NULL); 4671 4672 /* This the part3 4673 * We will test to catch timeout IO and check whether the IO is 4674 * the submitted one. 4675 */ 4676 memset(&cb_arg, 0, sizeof(cb_arg)); 4677 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4678 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4679 4680 /* Don't reach the limit */ 4681 spdk_delay_us(15 * spdk_get_ticks_hz()); 4682 poll_threads(); 4683 CU_ASSERT(cb_arg.type == 0); 4684 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4685 CU_ASSERT(cb_arg.iov.iov_len == 0); 4686 4687 /* 15 + 15 = 30 reach the limit */ 4688 spdk_delay_us(15 * spdk_get_ticks_hz()); 4689 poll_threads(); 4690 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4691 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4692 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4693 stub_complete_io(1); 4694 4695 /* Use the same split IO above and check the IO */ 4696 memset(&cb_arg, 0, sizeof(cb_arg)); 4697 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4698 4699 /* The first child complete in time */ 4700 spdk_delay_us(15 * spdk_get_ticks_hz()); 4701 poll_threads(); 4702 stub_complete_io(1); 4703 CU_ASSERT(cb_arg.type == 0); 4704 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4705 CU_ASSERT(cb_arg.iov.iov_len == 0); 4706 4707 /* The second child reach the limit */ 4708 spdk_delay_us(15 * spdk_get_ticks_hz()); 4709 poll_threads(); 4710 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4711 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4712 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4713 stub_complete_io(1); 4714 4715 /* Also include the reset IO */ 4716 memset(&cb_arg, 0, sizeof(cb_arg)); 4717 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4718 spdk_delay_us(30 * spdk_get_ticks_hz()); 4719 poll_threads(); 4720 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4721 stub_complete_io(1); 4722 poll_threads(); 4723 4724 spdk_put_io_channel(io_ch); 4725 spdk_bdev_close(desc); 4726 free_bdev(bdev); 4727 ut_fini_bdev(); 4728 } 4729 4730 static void 4731 bdev_set_qd_sampling(void) 4732 { 4733 struct spdk_bdev *bdev; 4734 struct spdk_bdev_desc *desc = NULL; 4735 struct spdk_io_channel *io_ch = NULL; 4736 struct spdk_bdev_channel *bdev_ch = NULL; 4737 struct timeout_io_cb_arg cb_arg; 4738 4739 ut_init_bdev(NULL); 4740 bdev = allocate_bdev("bdev"); 4741 4742 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4743 SPDK_CU_ASSERT_FATAL(desc != NULL); 4744 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4745 4746 io_ch = spdk_bdev_get_io_channel(desc); 4747 CU_ASSERT(io_ch != NULL); 4748 4749 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4750 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4751 4752 /* This is the part1. 4753 * We will check the bdev_ch->io_submitted list 4754 * TO make sure that it can link IOs and only the user submitted IOs 4755 */ 4756 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4757 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4758 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4759 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4760 stub_complete_io(1); 4761 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4762 stub_complete_io(1); 4763 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4764 4765 /* This is the part2. 4766 * Test the bdev's qd poller register 4767 */ 4768 /* 1st Successfully set the qd sampling period */ 4769 spdk_bdev_set_qd_sampling_period(bdev, 10); 4770 CU_ASSERT(bdev->internal.new_period == 10); 4771 CU_ASSERT(bdev->internal.period == 10); 4772 CU_ASSERT(bdev->internal.qd_desc != NULL); 4773 poll_threads(); 4774 CU_ASSERT(bdev->internal.qd_poller != NULL); 4775 4776 /* 2nd Change the qd sampling period */ 4777 spdk_bdev_set_qd_sampling_period(bdev, 20); 4778 CU_ASSERT(bdev->internal.new_period == 20); 4779 CU_ASSERT(bdev->internal.period == 10); 4780 CU_ASSERT(bdev->internal.qd_desc != NULL); 4781 poll_threads(); 4782 CU_ASSERT(bdev->internal.qd_poller != NULL); 4783 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4784 4785 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4786 spdk_delay_us(20); 4787 poll_thread_times(0, 1); 4788 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4789 spdk_bdev_set_qd_sampling_period(bdev, 30); 4790 CU_ASSERT(bdev->internal.new_period == 30); 4791 CU_ASSERT(bdev->internal.period == 20); 4792 poll_threads(); 4793 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4794 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4795 4796 /* 4th Disable the qd sampling period */ 4797 spdk_bdev_set_qd_sampling_period(bdev, 0); 4798 CU_ASSERT(bdev->internal.new_period == 0); 4799 CU_ASSERT(bdev->internal.period == 30); 4800 poll_threads(); 4801 CU_ASSERT(bdev->internal.qd_poller == NULL); 4802 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4803 CU_ASSERT(bdev->internal.qd_desc == NULL); 4804 4805 /* This is the part3. 4806 * We will test the submitted IO and reset works 4807 * properly with the qd sampling. 4808 */ 4809 memset(&cb_arg, 0, sizeof(cb_arg)); 4810 spdk_bdev_set_qd_sampling_period(bdev, 1); 4811 poll_threads(); 4812 4813 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4814 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4815 4816 /* Also include the reset IO */ 4817 memset(&cb_arg, 0, sizeof(cb_arg)); 4818 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4819 poll_threads(); 4820 4821 /* Close the desc */ 4822 spdk_put_io_channel(io_ch); 4823 spdk_bdev_close(desc); 4824 4825 /* Complete the submitted IO and reset */ 4826 stub_complete_io(2); 4827 poll_threads(); 4828 4829 free_bdev(bdev); 4830 ut_fini_bdev(); 4831 } 4832 4833 static void 4834 lba_range_overlap(void) 4835 { 4836 struct lba_range r1, r2; 4837 4838 r1.offset = 100; 4839 r1.length = 50; 4840 4841 r2.offset = 0; 4842 r2.length = 1; 4843 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4844 4845 r2.offset = 0; 4846 r2.length = 100; 4847 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4848 4849 r2.offset = 0; 4850 r2.length = 110; 4851 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4852 4853 r2.offset = 100; 4854 r2.length = 10; 4855 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4856 4857 r2.offset = 110; 4858 r2.length = 20; 4859 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4860 4861 r2.offset = 140; 4862 r2.length = 150; 4863 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4864 4865 r2.offset = 130; 4866 r2.length = 200; 4867 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4868 4869 r2.offset = 150; 4870 r2.length = 100; 4871 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4872 4873 r2.offset = 110; 4874 r2.length = 0; 4875 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4876 } 4877 4878 static bool g_lock_lba_range_done; 4879 static bool g_unlock_lba_range_done; 4880 4881 static void 4882 lock_lba_range_done(struct lba_range *range, void *ctx, int status) 4883 { 4884 g_lock_lba_range_done = true; 4885 } 4886 4887 static void 4888 unlock_lba_range_done(struct lba_range *range, void *ctx, int status) 4889 { 4890 g_unlock_lba_range_done = true; 4891 } 4892 4893 static void 4894 lock_lba_range_check_ranges(void) 4895 { 4896 struct spdk_bdev *bdev; 4897 struct spdk_bdev_desc *desc = NULL; 4898 struct spdk_io_channel *io_ch; 4899 struct spdk_bdev_channel *channel; 4900 struct lba_range *range; 4901 int ctx1; 4902 int rc; 4903 4904 ut_init_bdev(NULL); 4905 bdev = allocate_bdev("bdev0"); 4906 4907 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4908 CU_ASSERT(rc == 0); 4909 CU_ASSERT(desc != NULL); 4910 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4911 io_ch = spdk_bdev_get_io_channel(desc); 4912 CU_ASSERT(io_ch != NULL); 4913 channel = spdk_io_channel_get_ctx(io_ch); 4914 4915 g_lock_lba_range_done = false; 4916 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4917 CU_ASSERT(rc == 0); 4918 poll_threads(); 4919 4920 CU_ASSERT(g_lock_lba_range_done == true); 4921 range = TAILQ_FIRST(&channel->locked_ranges); 4922 SPDK_CU_ASSERT_FATAL(range != NULL); 4923 CU_ASSERT(range->offset == 20); 4924 CU_ASSERT(range->length == 10); 4925 CU_ASSERT(range->owner_ch == channel); 4926 4927 /* Unlocks must exactly match a lock. */ 4928 g_unlock_lba_range_done = false; 4929 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4930 CU_ASSERT(rc == -EINVAL); 4931 CU_ASSERT(g_unlock_lba_range_done == false); 4932 4933 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4934 CU_ASSERT(rc == 0); 4935 spdk_delay_us(100); 4936 poll_threads(); 4937 4938 CU_ASSERT(g_unlock_lba_range_done == true); 4939 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4940 4941 spdk_put_io_channel(io_ch); 4942 spdk_bdev_close(desc); 4943 free_bdev(bdev); 4944 ut_fini_bdev(); 4945 } 4946 4947 static void 4948 lock_lba_range_with_io_outstanding(void) 4949 { 4950 struct spdk_bdev *bdev; 4951 struct spdk_bdev_desc *desc = NULL; 4952 struct spdk_io_channel *io_ch; 4953 struct spdk_bdev_channel *channel; 4954 struct lba_range *range; 4955 char buf[4096]; 4956 int ctx1; 4957 int rc; 4958 4959 ut_init_bdev(NULL); 4960 bdev = allocate_bdev("bdev0"); 4961 4962 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4963 CU_ASSERT(rc == 0); 4964 CU_ASSERT(desc != NULL); 4965 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4966 io_ch = spdk_bdev_get_io_channel(desc); 4967 CU_ASSERT(io_ch != NULL); 4968 channel = spdk_io_channel_get_ctx(io_ch); 4969 4970 g_io_done = false; 4971 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4972 CU_ASSERT(rc == 0); 4973 4974 g_lock_lba_range_done = false; 4975 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4976 CU_ASSERT(rc == 0); 4977 poll_threads(); 4978 4979 /* The lock should immediately become valid, since there are no outstanding 4980 * write I/O. 4981 */ 4982 CU_ASSERT(g_io_done == false); 4983 CU_ASSERT(g_lock_lba_range_done == true); 4984 range = TAILQ_FIRST(&channel->locked_ranges); 4985 SPDK_CU_ASSERT_FATAL(range != NULL); 4986 CU_ASSERT(range->offset == 20); 4987 CU_ASSERT(range->length == 10); 4988 CU_ASSERT(range->owner_ch == channel); 4989 CU_ASSERT(range->locked_ctx == &ctx1); 4990 4991 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4992 CU_ASSERT(rc == 0); 4993 stub_complete_io(1); 4994 spdk_delay_us(100); 4995 poll_threads(); 4996 4997 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4998 4999 /* Now try again, but with a write I/O. */ 5000 g_io_done = false; 5001 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 5002 CU_ASSERT(rc == 0); 5003 5004 g_lock_lba_range_done = false; 5005 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5006 CU_ASSERT(rc == 0); 5007 poll_threads(); 5008 5009 /* The lock should not be fully valid yet, since a write I/O is outstanding. 5010 * But note that the range should be on the channel's locked_list, to make sure no 5011 * new write I/O are started. 5012 */ 5013 CU_ASSERT(g_io_done == false); 5014 CU_ASSERT(g_lock_lba_range_done == false); 5015 range = TAILQ_FIRST(&channel->locked_ranges); 5016 SPDK_CU_ASSERT_FATAL(range != NULL); 5017 CU_ASSERT(range->offset == 20); 5018 CU_ASSERT(range->length == 10); 5019 5020 /* Complete the write I/O. This should make the lock valid (checked by confirming 5021 * our callback was invoked). 5022 */ 5023 stub_complete_io(1); 5024 spdk_delay_us(100); 5025 poll_threads(); 5026 CU_ASSERT(g_io_done == true); 5027 CU_ASSERT(g_lock_lba_range_done == true); 5028 5029 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5030 CU_ASSERT(rc == 0); 5031 poll_threads(); 5032 5033 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5034 5035 spdk_put_io_channel(io_ch); 5036 spdk_bdev_close(desc); 5037 free_bdev(bdev); 5038 ut_fini_bdev(); 5039 } 5040 5041 static void 5042 lock_lba_range_overlapped(void) 5043 { 5044 struct spdk_bdev *bdev; 5045 struct spdk_bdev_desc *desc = NULL; 5046 struct spdk_io_channel *io_ch; 5047 struct spdk_bdev_channel *channel; 5048 struct lba_range *range; 5049 int ctx1; 5050 int rc; 5051 5052 ut_init_bdev(NULL); 5053 bdev = allocate_bdev("bdev0"); 5054 5055 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5056 CU_ASSERT(rc == 0); 5057 CU_ASSERT(desc != NULL); 5058 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5059 io_ch = spdk_bdev_get_io_channel(desc); 5060 CU_ASSERT(io_ch != NULL); 5061 channel = spdk_io_channel_get_ctx(io_ch); 5062 5063 /* Lock range 20-29. */ 5064 g_lock_lba_range_done = false; 5065 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5066 CU_ASSERT(rc == 0); 5067 poll_threads(); 5068 5069 CU_ASSERT(g_lock_lba_range_done == true); 5070 range = TAILQ_FIRST(&channel->locked_ranges); 5071 SPDK_CU_ASSERT_FATAL(range != NULL); 5072 CU_ASSERT(range->offset == 20); 5073 CU_ASSERT(range->length == 10); 5074 5075 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 5076 * 20-29. 5077 */ 5078 g_lock_lba_range_done = false; 5079 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 5080 CU_ASSERT(rc == 0); 5081 poll_threads(); 5082 5083 CU_ASSERT(g_lock_lba_range_done == false); 5084 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5085 SPDK_CU_ASSERT_FATAL(range != NULL); 5086 CU_ASSERT(range->offset == 25); 5087 CU_ASSERT(range->length == 15); 5088 5089 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 5090 * no longer overlaps with an active lock. 5091 */ 5092 g_unlock_lba_range_done = false; 5093 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5094 CU_ASSERT(rc == 0); 5095 poll_threads(); 5096 5097 CU_ASSERT(g_unlock_lba_range_done == true); 5098 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5099 range = TAILQ_FIRST(&channel->locked_ranges); 5100 SPDK_CU_ASSERT_FATAL(range != NULL); 5101 CU_ASSERT(range->offset == 25); 5102 CU_ASSERT(range->length == 15); 5103 5104 /* Lock 40-59. This should immediately lock since it does not overlap with the 5105 * currently active 25-39 lock. 5106 */ 5107 g_lock_lba_range_done = false; 5108 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 5109 CU_ASSERT(rc == 0); 5110 poll_threads(); 5111 5112 CU_ASSERT(g_lock_lba_range_done == true); 5113 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5114 SPDK_CU_ASSERT_FATAL(range != NULL); 5115 range = TAILQ_NEXT(range, tailq); 5116 SPDK_CU_ASSERT_FATAL(range != NULL); 5117 CU_ASSERT(range->offset == 40); 5118 CU_ASSERT(range->length == 20); 5119 5120 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 5121 g_lock_lba_range_done = false; 5122 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 5123 CU_ASSERT(rc == 0); 5124 poll_threads(); 5125 5126 CU_ASSERT(g_lock_lba_range_done == false); 5127 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5128 SPDK_CU_ASSERT_FATAL(range != NULL); 5129 CU_ASSERT(range->offset == 35); 5130 CU_ASSERT(range->length == 10); 5131 5132 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 5133 * the 40-59 lock is still active. 5134 */ 5135 g_unlock_lba_range_done = false; 5136 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 5137 CU_ASSERT(rc == 0); 5138 poll_threads(); 5139 5140 CU_ASSERT(g_unlock_lba_range_done == true); 5141 CU_ASSERT(g_lock_lba_range_done == false); 5142 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5143 SPDK_CU_ASSERT_FATAL(range != NULL); 5144 CU_ASSERT(range->offset == 35); 5145 CU_ASSERT(range->length == 10); 5146 5147 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 5148 * no longer any active overlapping locks. 5149 */ 5150 g_unlock_lba_range_done = false; 5151 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 5152 CU_ASSERT(rc == 0); 5153 poll_threads(); 5154 5155 CU_ASSERT(g_unlock_lba_range_done == true); 5156 CU_ASSERT(g_lock_lba_range_done == true); 5157 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5158 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5159 SPDK_CU_ASSERT_FATAL(range != NULL); 5160 CU_ASSERT(range->offset == 35); 5161 CU_ASSERT(range->length == 10); 5162 5163 /* Finally, unlock 35-44. */ 5164 g_unlock_lba_range_done = false; 5165 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 5166 CU_ASSERT(rc == 0); 5167 poll_threads(); 5168 5169 CU_ASSERT(g_unlock_lba_range_done == true); 5170 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 5171 5172 spdk_put_io_channel(io_ch); 5173 spdk_bdev_close(desc); 5174 free_bdev(bdev); 5175 ut_fini_bdev(); 5176 } 5177 5178 static void 5179 bdev_quiesce_done(void *ctx, int status) 5180 { 5181 g_lock_lba_range_done = true; 5182 } 5183 5184 static void 5185 bdev_unquiesce_done(void *ctx, int status) 5186 { 5187 g_unlock_lba_range_done = true; 5188 } 5189 5190 static void 5191 bdev_quiesce_done_unquiesce(void *ctx, int status) 5192 { 5193 struct spdk_bdev *bdev = ctx; 5194 int rc; 5195 5196 g_lock_lba_range_done = true; 5197 5198 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL); 5199 CU_ASSERT(rc == 0); 5200 } 5201 5202 static void 5203 bdev_quiesce(void) 5204 { 5205 struct spdk_bdev *bdev; 5206 struct spdk_bdev_desc *desc = NULL; 5207 struct spdk_io_channel *io_ch; 5208 struct spdk_bdev_channel *channel; 5209 struct lba_range *range; 5210 struct spdk_bdev_io *bdev_io; 5211 int ctx1; 5212 int rc; 5213 5214 ut_init_bdev(NULL); 5215 bdev = allocate_bdev("bdev0"); 5216 5217 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5218 CU_ASSERT(rc == 0); 5219 CU_ASSERT(desc != NULL); 5220 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5221 io_ch = spdk_bdev_get_io_channel(desc); 5222 CU_ASSERT(io_ch != NULL); 5223 channel = spdk_io_channel_get_ctx(io_ch); 5224 5225 g_lock_lba_range_done = false; 5226 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5227 CU_ASSERT(rc == 0); 5228 poll_threads(); 5229 5230 CU_ASSERT(g_lock_lba_range_done == true); 5231 range = TAILQ_FIRST(&channel->locked_ranges); 5232 SPDK_CU_ASSERT_FATAL(range != NULL); 5233 CU_ASSERT(range->offset == 0); 5234 CU_ASSERT(range->length == bdev->blockcnt); 5235 CU_ASSERT(range->owner_ch == NULL); 5236 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5237 SPDK_CU_ASSERT_FATAL(range != NULL); 5238 CU_ASSERT(range->offset == 0); 5239 CU_ASSERT(range->length == bdev->blockcnt); 5240 CU_ASSERT(range->owner_ch == NULL); 5241 5242 g_unlock_lba_range_done = false; 5243 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5244 CU_ASSERT(rc == 0); 5245 spdk_delay_us(100); 5246 poll_threads(); 5247 5248 CU_ASSERT(g_unlock_lba_range_done == true); 5249 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5250 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5251 5252 g_lock_lba_range_done = false; 5253 rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1); 5254 CU_ASSERT(rc == 0); 5255 poll_threads(); 5256 5257 CU_ASSERT(g_lock_lba_range_done == true); 5258 range = TAILQ_FIRST(&channel->locked_ranges); 5259 SPDK_CU_ASSERT_FATAL(range != NULL); 5260 CU_ASSERT(range->offset == 20); 5261 CU_ASSERT(range->length == 10); 5262 CU_ASSERT(range->owner_ch == NULL); 5263 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5264 SPDK_CU_ASSERT_FATAL(range != NULL); 5265 CU_ASSERT(range->offset == 20); 5266 CU_ASSERT(range->length == 10); 5267 CU_ASSERT(range->owner_ch == NULL); 5268 5269 /* Unlocks must exactly match a lock. */ 5270 g_unlock_lba_range_done = false; 5271 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1); 5272 CU_ASSERT(rc == -EINVAL); 5273 CU_ASSERT(g_unlock_lba_range_done == false); 5274 5275 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1); 5276 CU_ASSERT(rc == 0); 5277 spdk_delay_us(100); 5278 poll_threads(); 5279 5280 CU_ASSERT(g_unlock_lba_range_done == true); 5281 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5282 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5283 5284 /* Test unquiesce from quiesce cb */ 5285 g_lock_lba_range_done = false; 5286 g_unlock_lba_range_done = false; 5287 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev); 5288 CU_ASSERT(rc == 0); 5289 poll_threads(); 5290 5291 CU_ASSERT(g_lock_lba_range_done == true); 5292 CU_ASSERT(g_unlock_lba_range_done == true); 5293 5294 /* Test quiesce with read I/O */ 5295 g_lock_lba_range_done = false; 5296 g_unlock_lba_range_done = false; 5297 g_io_done = false; 5298 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5299 CU_ASSERT(rc == 0); 5300 5301 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5302 CU_ASSERT(rc == 0); 5303 poll_threads(); 5304 5305 CU_ASSERT(g_io_done == false); 5306 CU_ASSERT(g_lock_lba_range_done == false); 5307 range = TAILQ_FIRST(&channel->locked_ranges); 5308 SPDK_CU_ASSERT_FATAL(range != NULL); 5309 5310 stub_complete_io(1); 5311 spdk_delay_us(100); 5312 poll_threads(); 5313 CU_ASSERT(g_io_done == true); 5314 CU_ASSERT(g_lock_lba_range_done == true); 5315 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5316 5317 g_io_done = false; 5318 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5319 CU_ASSERT(rc == 0); 5320 5321 bdev_io = TAILQ_FIRST(&channel->io_locked); 5322 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 5323 CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20); 5324 CU_ASSERT(bdev_io->u.bdev.num_blocks == 1); 5325 5326 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5327 CU_ASSERT(rc == 0); 5328 spdk_delay_us(100); 5329 poll_threads(); 5330 5331 CU_ASSERT(g_unlock_lba_range_done == true); 5332 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5333 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5334 5335 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5336 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 5337 poll_threads(); 5338 CU_ASSERT(g_io_done == true); 5339 5340 spdk_put_io_channel(io_ch); 5341 spdk_bdev_close(desc); 5342 free_bdev(bdev); 5343 ut_fini_bdev(); 5344 } 5345 5346 static void 5347 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 5348 { 5349 g_abort_done = true; 5350 g_abort_status = bdev_io->internal.status; 5351 spdk_bdev_free_io(bdev_io); 5352 } 5353 5354 static void 5355 bdev_io_abort(void) 5356 { 5357 struct spdk_bdev *bdev; 5358 struct spdk_bdev_desc *desc = NULL; 5359 struct spdk_io_channel *io_ch; 5360 struct spdk_bdev_channel *channel; 5361 struct spdk_bdev_mgmt_channel *mgmt_ch; 5362 struct spdk_bdev_opts bdev_opts = {}; 5363 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 5364 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5365 int rc; 5366 5367 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5368 bdev_opts.bdev_io_pool_size = 7; 5369 bdev_opts.bdev_io_cache_size = 2; 5370 ut_init_bdev(&bdev_opts); 5371 5372 bdev = allocate_bdev("bdev0"); 5373 5374 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5375 CU_ASSERT(rc == 0); 5376 CU_ASSERT(desc != NULL); 5377 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5378 io_ch = spdk_bdev_get_io_channel(desc); 5379 CU_ASSERT(io_ch != NULL); 5380 channel = spdk_io_channel_get_ctx(io_ch); 5381 mgmt_ch = channel->shared_resource->mgmt_ch; 5382 5383 g_abort_done = false; 5384 5385 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5386 5387 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5388 CU_ASSERT(rc == -ENOTSUP); 5389 5390 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5391 5392 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5393 CU_ASSERT(rc == 0); 5394 CU_ASSERT(g_abort_done == true); 5395 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5396 5397 /* Test the case that the target I/O was successfully aborted. */ 5398 g_io_done = false; 5399 5400 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5401 CU_ASSERT(rc == 0); 5402 CU_ASSERT(g_io_done == false); 5403 5404 g_abort_done = false; 5405 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5406 5407 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5408 CU_ASSERT(rc == 0); 5409 CU_ASSERT(g_io_done == true); 5410 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5411 stub_complete_io(1); 5412 CU_ASSERT(g_abort_done == true); 5413 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5414 5415 /* Test the case that the target I/O was not aborted because it completed 5416 * in the middle of execution of the abort. 5417 */ 5418 g_io_done = false; 5419 5420 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5421 CU_ASSERT(rc == 0); 5422 CU_ASSERT(g_io_done == false); 5423 5424 g_abort_done = false; 5425 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5426 5427 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5428 CU_ASSERT(rc == 0); 5429 CU_ASSERT(g_io_done == false); 5430 5431 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5432 stub_complete_io(1); 5433 CU_ASSERT(g_io_done == true); 5434 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5435 5436 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5437 stub_complete_io(1); 5438 CU_ASSERT(g_abort_done == true); 5439 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5440 5441 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5442 5443 bdev->optimal_io_boundary = 16; 5444 bdev->split_on_optimal_io_boundary = true; 5445 5446 /* Test that a single-vector command which is split is aborted correctly. 5447 * Offset 14, length 8, payload 0xF000 5448 * Child - Offset 14, length 2, payload 0xF000 5449 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5450 */ 5451 g_io_done = false; 5452 5453 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5454 CU_ASSERT(rc == 0); 5455 CU_ASSERT(g_io_done == false); 5456 5457 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5458 5459 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5460 5461 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5462 CU_ASSERT(rc == 0); 5463 CU_ASSERT(g_io_done == true); 5464 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5465 stub_complete_io(2); 5466 CU_ASSERT(g_abort_done == true); 5467 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5468 5469 /* Test that a multi-vector command that needs to be split by strip and then 5470 * needs to be split is aborted correctly. Abort is requested before the second 5471 * child I/O was submitted. The parent I/O should complete with failure without 5472 * submitting the second child I/O. 5473 */ 5474 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5475 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5476 iov[i].iov_len = 512; 5477 } 5478 5479 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5480 g_io_done = false; 5481 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5482 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5483 CU_ASSERT(rc == 0); 5484 CU_ASSERT(g_io_done == false); 5485 5486 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5487 5488 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5489 5490 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5491 CU_ASSERT(rc == 0); 5492 CU_ASSERT(g_io_done == true); 5493 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5494 stub_complete_io(1); 5495 CU_ASSERT(g_abort_done == true); 5496 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5497 5498 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5499 5500 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5501 5502 bdev->optimal_io_boundary = 16; 5503 g_io_done = false; 5504 5505 /* Test that a single-vector command which is split is aborted correctly. 5506 * Differently from the above, the child abort request will be submitted 5507 * sequentially due to the capacity of spdk_bdev_io. 5508 */ 5509 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5510 CU_ASSERT(rc == 0); 5511 CU_ASSERT(g_io_done == false); 5512 5513 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5514 5515 g_abort_done = false; 5516 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5517 5518 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5519 CU_ASSERT(rc == 0); 5520 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5521 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5522 5523 stub_complete_io(1); 5524 CU_ASSERT(g_io_done == true); 5525 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5526 stub_complete_io(3); 5527 CU_ASSERT(g_abort_done == true); 5528 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5529 5530 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5531 5532 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5533 5534 bdev->split_on_optimal_io_boundary = false; 5535 bdev->split_on_write_unit = true; 5536 bdev->write_unit_size = 16; 5537 5538 /* Test that a single-vector command which is split is aborted correctly. 5539 * Offset 16, length 32, payload 0xF000 5540 * Child - Offset 16, length 16, payload 0xF000 5541 * Child - Offset 32, length 16, payload 0xF000 + 16 * 512 5542 * 5543 * Use bdev->split_on_write_unit as a split condition. 5544 */ 5545 g_io_done = false; 5546 5547 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 16, 32, io_done, &io_ctx1); 5548 CU_ASSERT(rc == 0); 5549 CU_ASSERT(g_io_done == false); 5550 5551 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5552 5553 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5554 5555 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5556 CU_ASSERT(rc == 0); 5557 CU_ASSERT(g_io_done == true); 5558 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5559 stub_complete_io(2); 5560 CU_ASSERT(g_abort_done == true); 5561 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5562 5563 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5564 5565 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5566 5567 bdev->split_on_write_unit = false; 5568 bdev->max_rw_size = 16; 5569 5570 /* Test that a single-vector command which is split is aborted correctly. 5571 * Use bdev->max_rw_size as a split condition. 5572 */ 5573 g_io_done = false; 5574 5575 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1); 5576 CU_ASSERT(rc == 0); 5577 CU_ASSERT(g_io_done == false); 5578 5579 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5580 5581 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5582 5583 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5584 CU_ASSERT(rc == 0); 5585 CU_ASSERT(g_io_done == true); 5586 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5587 stub_complete_io(2); 5588 CU_ASSERT(g_abort_done == true); 5589 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5590 5591 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5592 5593 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5594 5595 bdev->max_rw_size = 0; 5596 bdev->max_segment_size = 512 * 16; 5597 bdev->max_num_segments = 1; 5598 5599 /* Test that a single-vector command which is split is aborted correctly. 5600 * Use bdev->max_segment_size and bdev->max_num_segments together as split conditions. 5601 * 5602 * One single-vector command is changed to one two-vectors command, but 5603 * bdev->max_num_segments is 1 and it is split into two single-vector commands. 5604 */ 5605 g_io_done = false; 5606 5607 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1); 5608 CU_ASSERT(rc == 0); 5609 CU_ASSERT(g_io_done == false); 5610 5611 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5612 5613 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5614 5615 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5616 CU_ASSERT(rc == 0); 5617 CU_ASSERT(g_io_done == true); 5618 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5619 stub_complete_io(2); 5620 CU_ASSERT(g_abort_done == true); 5621 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5622 5623 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5624 5625 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5626 5627 spdk_put_io_channel(io_ch); 5628 spdk_bdev_close(desc); 5629 free_bdev(bdev); 5630 ut_fini_bdev(); 5631 } 5632 5633 static void 5634 bdev_unmap(void) 5635 { 5636 struct spdk_bdev *bdev; 5637 struct spdk_bdev_desc *desc = NULL; 5638 struct spdk_io_channel *ioch; 5639 struct spdk_bdev_channel *bdev_ch; 5640 struct ut_expected_io *expected_io; 5641 struct spdk_bdev_opts bdev_opts = {}; 5642 uint32_t i, num_outstanding; 5643 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5644 int rc; 5645 5646 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5647 bdev_opts.bdev_io_pool_size = 512; 5648 bdev_opts.bdev_io_cache_size = 64; 5649 ut_init_bdev(&bdev_opts); 5650 5651 bdev = allocate_bdev("bdev"); 5652 5653 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5654 CU_ASSERT_EQUAL(rc, 0); 5655 SPDK_CU_ASSERT_FATAL(desc != NULL); 5656 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5657 ioch = spdk_bdev_get_io_channel(desc); 5658 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5659 bdev_ch = spdk_io_channel_get_ctx(ioch); 5660 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5661 5662 fn_table.submit_request = stub_submit_request; 5663 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5664 5665 /* Case 1: First test the request won't be split */ 5666 num_blocks = 32; 5667 5668 g_io_done = false; 5669 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5670 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5671 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5672 CU_ASSERT_EQUAL(rc, 0); 5673 CU_ASSERT(g_io_done == false); 5674 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5675 stub_complete_io(1); 5676 CU_ASSERT(g_io_done == true); 5677 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5678 5679 /* Case 2: Test the split with 2 children requests */ 5680 bdev->max_unmap = 8; 5681 bdev->max_unmap_segments = 2; 5682 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5683 num_blocks = max_unmap_blocks * 2; 5684 offset = 0; 5685 5686 g_io_done = false; 5687 for (i = 0; i < 2; i++) { 5688 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5689 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5690 offset += max_unmap_blocks; 5691 } 5692 5693 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5694 CU_ASSERT_EQUAL(rc, 0); 5695 CU_ASSERT(g_io_done == false); 5696 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5697 stub_complete_io(2); 5698 CU_ASSERT(g_io_done == true); 5699 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5700 5701 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5702 num_children = 15; 5703 num_blocks = max_unmap_blocks * num_children; 5704 g_io_done = false; 5705 offset = 0; 5706 for (i = 0; i < num_children; i++) { 5707 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5708 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5709 offset += max_unmap_blocks; 5710 } 5711 5712 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5713 CU_ASSERT_EQUAL(rc, 0); 5714 CU_ASSERT(g_io_done == false); 5715 5716 while (num_children > 0) { 5717 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5718 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5719 stub_complete_io(num_outstanding); 5720 num_children -= num_outstanding; 5721 } 5722 CU_ASSERT(g_io_done == true); 5723 5724 spdk_put_io_channel(ioch); 5725 spdk_bdev_close(desc); 5726 free_bdev(bdev); 5727 ut_fini_bdev(); 5728 } 5729 5730 static void 5731 bdev_write_zeroes_split_test(void) 5732 { 5733 struct spdk_bdev *bdev; 5734 struct spdk_bdev_desc *desc = NULL; 5735 struct spdk_io_channel *ioch; 5736 struct spdk_bdev_channel *bdev_ch; 5737 struct ut_expected_io *expected_io; 5738 struct spdk_bdev_opts bdev_opts = {}; 5739 uint32_t i, num_outstanding; 5740 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5741 int rc; 5742 5743 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5744 bdev_opts.bdev_io_pool_size = 512; 5745 bdev_opts.bdev_io_cache_size = 64; 5746 ut_init_bdev(&bdev_opts); 5747 5748 bdev = allocate_bdev("bdev"); 5749 5750 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5751 CU_ASSERT_EQUAL(rc, 0); 5752 SPDK_CU_ASSERT_FATAL(desc != NULL); 5753 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5754 ioch = spdk_bdev_get_io_channel(desc); 5755 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5756 bdev_ch = spdk_io_channel_get_ctx(ioch); 5757 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5758 5759 fn_table.submit_request = stub_submit_request; 5760 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5761 5762 /* Case 1: First test the request won't be split */ 5763 num_blocks = 32; 5764 5765 g_io_done = false; 5766 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5767 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5768 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5769 CU_ASSERT_EQUAL(rc, 0); 5770 CU_ASSERT(g_io_done == false); 5771 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5772 stub_complete_io(1); 5773 CU_ASSERT(g_io_done == true); 5774 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5775 5776 /* Case 2: Test the split with 2 children requests */ 5777 max_write_zeroes_blocks = 8; 5778 bdev->max_write_zeroes = max_write_zeroes_blocks; 5779 num_blocks = max_write_zeroes_blocks * 2; 5780 offset = 0; 5781 5782 g_io_done = false; 5783 for (i = 0; i < 2; i++) { 5784 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5785 0); 5786 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5787 offset += max_write_zeroes_blocks; 5788 } 5789 5790 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5791 CU_ASSERT_EQUAL(rc, 0); 5792 CU_ASSERT(g_io_done == false); 5793 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5794 stub_complete_io(2); 5795 CU_ASSERT(g_io_done == true); 5796 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5797 5798 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5799 num_children = 15; 5800 num_blocks = max_write_zeroes_blocks * num_children; 5801 g_io_done = false; 5802 offset = 0; 5803 for (i = 0; i < num_children; i++) { 5804 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5805 0); 5806 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5807 offset += max_write_zeroes_blocks; 5808 } 5809 5810 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5811 CU_ASSERT_EQUAL(rc, 0); 5812 CU_ASSERT(g_io_done == false); 5813 5814 while (num_children > 0) { 5815 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5816 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5817 stub_complete_io(num_outstanding); 5818 num_children -= num_outstanding; 5819 } 5820 CU_ASSERT(g_io_done == true); 5821 5822 spdk_put_io_channel(ioch); 5823 spdk_bdev_close(desc); 5824 free_bdev(bdev); 5825 ut_fini_bdev(); 5826 } 5827 5828 static void 5829 bdev_set_options_test(void) 5830 { 5831 struct spdk_bdev_opts bdev_opts = {}; 5832 int rc; 5833 5834 /* Case1: Do not set opts_size */ 5835 rc = spdk_bdev_set_opts(&bdev_opts); 5836 CU_ASSERT(rc == -1); 5837 } 5838 5839 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5840 5841 static int 5842 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5843 int array_size) 5844 { 5845 if (array_size > 0 && domains) { 5846 domains[0] = g_bdev_memory_domain; 5847 } 5848 5849 return 1; 5850 } 5851 5852 static void 5853 bdev_get_memory_domains(void) 5854 { 5855 struct spdk_bdev_fn_table fn_table = { 5856 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5857 }; 5858 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5859 struct spdk_memory_domain *domains[2] = {}; 5860 int rc; 5861 5862 /* bdev is NULL */ 5863 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5864 CU_ASSERT(rc == -EINVAL); 5865 5866 /* domains is NULL */ 5867 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5868 CU_ASSERT(rc == 1); 5869 5870 /* array size is 0 */ 5871 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5872 CU_ASSERT(rc == 1); 5873 5874 /* get_supported_dma_device_types op is set */ 5875 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5876 CU_ASSERT(rc == 1); 5877 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5878 5879 /* get_supported_dma_device_types op is not set */ 5880 fn_table.get_memory_domains = NULL; 5881 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5882 CU_ASSERT(rc == 0); 5883 } 5884 5885 static void 5886 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5887 { 5888 struct spdk_bdev *bdev; 5889 struct spdk_bdev_desc *desc = NULL; 5890 struct spdk_io_channel *io_ch; 5891 char io_buf[512]; 5892 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5893 struct ut_expected_io *expected_io; 5894 int rc; 5895 5896 ut_init_bdev(NULL); 5897 5898 bdev = allocate_bdev("bdev0"); 5899 bdev->md_interleave = false; 5900 bdev->md_len = 8; 5901 5902 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5903 CU_ASSERT(rc == 0); 5904 SPDK_CU_ASSERT_FATAL(desc != NULL); 5905 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5906 io_ch = spdk_bdev_get_io_channel(desc); 5907 CU_ASSERT(io_ch != NULL); 5908 5909 /* read */ 5910 g_io_done = false; 5911 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5912 if (ext_io_opts) { 5913 expected_io->md_buf = ext_io_opts->metadata; 5914 } 5915 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5916 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5917 5918 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5919 5920 CU_ASSERT(rc == 0); 5921 CU_ASSERT(g_io_done == false); 5922 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5923 stub_complete_io(1); 5924 CU_ASSERT(g_io_done == true); 5925 5926 /* write */ 5927 g_io_done = false; 5928 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5929 if (ext_io_opts) { 5930 expected_io->md_buf = ext_io_opts->metadata; 5931 } 5932 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5933 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5934 5935 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5936 5937 CU_ASSERT(rc == 0); 5938 CU_ASSERT(g_io_done == false); 5939 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5940 stub_complete_io(1); 5941 CU_ASSERT(g_io_done == true); 5942 5943 spdk_put_io_channel(io_ch); 5944 spdk_bdev_close(desc); 5945 free_bdev(bdev); 5946 ut_fini_bdev(); 5947 5948 } 5949 5950 static void 5951 bdev_io_ext(void) 5952 { 5953 struct spdk_bdev_ext_io_opts ext_io_opts = { 5954 .metadata = (void *)0xFF000000, 5955 .size = sizeof(ext_io_opts), 5956 .dif_check_flags_exclude_mask = 0 5957 }; 5958 5959 _bdev_io_ext(&ext_io_opts); 5960 } 5961 5962 static void 5963 bdev_io_ext_no_opts(void) 5964 { 5965 _bdev_io_ext(NULL); 5966 } 5967 5968 static void 5969 bdev_io_ext_invalid_opts(void) 5970 { 5971 struct spdk_bdev *bdev; 5972 struct spdk_bdev_desc *desc = NULL; 5973 struct spdk_io_channel *io_ch; 5974 char io_buf[512]; 5975 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5976 struct spdk_bdev_ext_io_opts ext_io_opts = { 5977 .metadata = (void *)0xFF000000, 5978 .size = sizeof(ext_io_opts), 5979 .dif_check_flags_exclude_mask = 0 5980 }; 5981 int rc; 5982 5983 ut_init_bdev(NULL); 5984 5985 bdev = allocate_bdev("bdev0"); 5986 bdev->md_interleave = false; 5987 bdev->md_len = 8; 5988 5989 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5990 CU_ASSERT(rc == 0); 5991 SPDK_CU_ASSERT_FATAL(desc != NULL); 5992 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5993 io_ch = spdk_bdev_get_io_channel(desc); 5994 CU_ASSERT(io_ch != NULL); 5995 5996 /* Test invalid ext_opts size */ 5997 ext_io_opts.size = 0; 5998 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5999 CU_ASSERT(rc == -EINVAL); 6000 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6001 CU_ASSERT(rc == -EINVAL); 6002 6003 ext_io_opts.size = sizeof(ext_io_opts) * 2; 6004 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6005 CU_ASSERT(rc == -EINVAL); 6006 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6007 CU_ASSERT(rc == -EINVAL); 6008 6009 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 6010 sizeof(ext_io_opts.metadata) - 1; 6011 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6012 CU_ASSERT(rc == -EINVAL); 6013 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6014 CU_ASSERT(rc == -EINVAL); 6015 6016 spdk_put_io_channel(io_ch); 6017 spdk_bdev_close(desc); 6018 free_bdev(bdev); 6019 ut_fini_bdev(); 6020 } 6021 6022 static void 6023 bdev_io_ext_split(void) 6024 { 6025 struct spdk_bdev *bdev; 6026 struct spdk_bdev_desc *desc = NULL; 6027 struct spdk_io_channel *io_ch; 6028 char io_buf[512]; 6029 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 6030 struct ut_expected_io *expected_io; 6031 struct spdk_bdev_ext_io_opts ext_io_opts = { 6032 .metadata = (void *)0xFF000000, 6033 .size = sizeof(ext_io_opts), 6034 .dif_check_flags_exclude_mask = 0 6035 }; 6036 int rc; 6037 6038 ut_init_bdev(NULL); 6039 6040 bdev = allocate_bdev("bdev0"); 6041 bdev->md_interleave = false; 6042 bdev->md_len = 8; 6043 6044 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6045 CU_ASSERT(rc == 0); 6046 SPDK_CU_ASSERT_FATAL(desc != NULL); 6047 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6048 io_ch = spdk_bdev_get_io_channel(desc); 6049 CU_ASSERT(io_ch != NULL); 6050 6051 /* Check that IO request with ext_opts and metadata is split correctly 6052 * Offset 14, length 8, payload 0xF000 6053 * Child - Offset 14, length 2, payload 0xF000 6054 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 6055 */ 6056 bdev->optimal_io_boundary = 16; 6057 bdev->split_on_optimal_io_boundary = true; 6058 bdev->md_interleave = false; 6059 bdev->md_len = 8; 6060 6061 iov.iov_base = (void *)0xF000; 6062 iov.iov_len = 4096; 6063 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 6064 ext_io_opts.metadata = (void *)0xFF000000; 6065 ext_io_opts.size = sizeof(ext_io_opts); 6066 g_io_done = false; 6067 6068 /* read */ 6069 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 6070 expected_io->md_buf = ext_io_opts.metadata; 6071 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 6072 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6073 6074 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 6075 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 6076 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 6077 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6078 6079 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 6080 CU_ASSERT(rc == 0); 6081 CU_ASSERT(g_io_done == false); 6082 6083 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6084 stub_complete_io(2); 6085 CU_ASSERT(g_io_done == true); 6086 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6087 6088 /* write */ 6089 g_io_done = false; 6090 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 6091 expected_io->md_buf = ext_io_opts.metadata; 6092 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 6093 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6094 6095 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 6096 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 6097 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 6098 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6099 6100 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 6101 CU_ASSERT(rc == 0); 6102 CU_ASSERT(g_io_done == false); 6103 6104 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6105 stub_complete_io(2); 6106 CU_ASSERT(g_io_done == true); 6107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6108 6109 spdk_put_io_channel(io_ch); 6110 spdk_bdev_close(desc); 6111 free_bdev(bdev); 6112 ut_fini_bdev(); 6113 } 6114 6115 static void 6116 bdev_io_ext_bounce_buffer(void) 6117 { 6118 struct spdk_bdev *bdev; 6119 struct spdk_bdev_desc *desc = NULL; 6120 struct spdk_io_channel *io_ch; 6121 char io_buf[512]; 6122 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 6123 struct ut_expected_io *expected_io, *aux_io; 6124 struct spdk_bdev_ext_io_opts ext_io_opts = { 6125 .metadata = (void *)0xFF000000, 6126 .size = sizeof(ext_io_opts), 6127 .dif_check_flags_exclude_mask = 0 6128 }; 6129 int rc; 6130 6131 ut_init_bdev(NULL); 6132 6133 bdev = allocate_bdev("bdev0"); 6134 bdev->md_interleave = false; 6135 bdev->md_len = 8; 6136 6137 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6138 CU_ASSERT(rc == 0); 6139 SPDK_CU_ASSERT_FATAL(desc != NULL); 6140 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6141 io_ch = spdk_bdev_get_io_channel(desc); 6142 CU_ASSERT(io_ch != NULL); 6143 6144 /* Verify data pull/push 6145 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 6146 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 6147 6148 /* read */ 6149 g_io_done = false; 6150 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6151 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6152 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6153 6154 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6155 6156 CU_ASSERT(rc == 0); 6157 CU_ASSERT(g_io_done == false); 6158 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6159 stub_complete_io(1); 6160 CU_ASSERT(g_memory_domain_push_data_called == true); 6161 CU_ASSERT(g_io_done == true); 6162 6163 /* write */ 6164 g_io_done = false; 6165 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6166 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6167 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6168 6169 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6170 6171 CU_ASSERT(rc == 0); 6172 CU_ASSERT(g_memory_domain_pull_data_called == true); 6173 CU_ASSERT(g_io_done == false); 6174 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6175 stub_complete_io(1); 6176 CU_ASSERT(g_io_done == true); 6177 6178 /* Verify the request is queued after receiving ENOMEM from pull */ 6179 g_io_done = false; 6180 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6181 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6182 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6183 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6184 CU_ASSERT(rc == 0); 6185 CU_ASSERT(g_io_done == false); 6186 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6187 6188 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6189 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6190 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6191 6192 MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM); 6193 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6194 CU_ASSERT(rc == 0); 6195 CU_ASSERT(g_io_done == false); 6196 /* The second IO has been queued */ 6197 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6198 6199 MOCK_CLEAR(spdk_memory_domain_pull_data); 6200 g_memory_domain_pull_data_called = false; 6201 stub_complete_io(1); 6202 CU_ASSERT(g_io_done == true); 6203 CU_ASSERT(g_memory_domain_pull_data_called == true); 6204 /* The second IO should be submitted now */ 6205 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6206 g_io_done = false; 6207 stub_complete_io(1); 6208 CU_ASSERT(g_io_done == true); 6209 6210 /* Verify the request is queued after receiving ENOMEM from push */ 6211 g_io_done = false; 6212 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6213 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6214 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6215 6216 MOCK_SET(spdk_memory_domain_push_data, -ENOMEM); 6217 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6218 CU_ASSERT(rc == 0); 6219 CU_ASSERT(g_io_done == false); 6220 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6221 6222 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6223 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6224 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6225 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6226 CU_ASSERT(rc == 0); 6227 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6228 6229 stub_complete_io(1); 6230 /* The IO isn't done yet, it's still waiting on push */ 6231 CU_ASSERT(g_io_done == false); 6232 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6233 MOCK_CLEAR(spdk_memory_domain_push_data); 6234 g_memory_domain_push_data_called = false; 6235 /* Completing the second IO should also trigger push on the first one */ 6236 stub_complete_io(1); 6237 CU_ASSERT(g_io_done == true); 6238 CU_ASSERT(g_memory_domain_push_data_called == true); 6239 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6240 6241 spdk_put_io_channel(io_ch); 6242 spdk_bdev_close(desc); 6243 free_bdev(bdev); 6244 ut_fini_bdev(); 6245 } 6246 6247 static void 6248 bdev_register_uuid_alias(void) 6249 { 6250 struct spdk_bdev *bdev, *second; 6251 char uuid[SPDK_UUID_STRING_LEN]; 6252 int rc; 6253 6254 ut_init_bdev(NULL); 6255 bdev = allocate_bdev("bdev0"); 6256 6257 /* Make sure an UUID was generated */ 6258 CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid)); 6259 6260 /* Check that an UUID alias was registered */ 6261 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6262 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6263 6264 /* Unregister the bdev */ 6265 spdk_bdev_unregister(bdev, NULL, NULL); 6266 poll_threads(); 6267 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6268 6269 /* Check the same, but this time register the bdev with non-zero UUID */ 6270 rc = spdk_bdev_register(bdev); 6271 CU_ASSERT_EQUAL(rc, 0); 6272 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6273 6274 /* Unregister the bdev */ 6275 spdk_bdev_unregister(bdev, NULL, NULL); 6276 poll_threads(); 6277 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6278 6279 /* Register the bdev using UUID as the name */ 6280 bdev->name = uuid; 6281 rc = spdk_bdev_register(bdev); 6282 CU_ASSERT_EQUAL(rc, 0); 6283 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6284 6285 /* Unregister the bdev */ 6286 spdk_bdev_unregister(bdev, NULL, NULL); 6287 poll_threads(); 6288 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6289 6290 /* Check that it's not possible to register two bdevs with the same UUIDs */ 6291 bdev->name = "bdev0"; 6292 second = allocate_bdev("bdev1"); 6293 spdk_uuid_copy(&bdev->uuid, &second->uuid); 6294 rc = spdk_bdev_register(bdev); 6295 CU_ASSERT_EQUAL(rc, -EEXIST); 6296 6297 /* Regenerate the UUID and re-check */ 6298 spdk_uuid_generate(&bdev->uuid); 6299 rc = spdk_bdev_register(bdev); 6300 CU_ASSERT_EQUAL(rc, 0); 6301 6302 /* And check that both bdevs can be retrieved through their UUIDs */ 6303 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6304 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6305 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 6306 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 6307 6308 free_bdev(second); 6309 free_bdev(bdev); 6310 ut_fini_bdev(); 6311 } 6312 6313 static void 6314 bdev_unregister_by_name(void) 6315 { 6316 struct spdk_bdev *bdev; 6317 int rc; 6318 6319 bdev = allocate_bdev("bdev"); 6320 6321 g_event_type1 = 0xFF; 6322 g_unregister_arg = NULL; 6323 g_unregister_rc = -1; 6324 6325 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6326 CU_ASSERT(rc == -ENODEV); 6327 6328 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6329 CU_ASSERT(rc == -ENODEV); 6330 6331 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6332 CU_ASSERT(rc == 0); 6333 6334 /* Check that unregister callback is delayed */ 6335 CU_ASSERT(g_unregister_arg == NULL); 6336 CU_ASSERT(g_unregister_rc == -1); 6337 6338 poll_threads(); 6339 6340 /* Event callback shall not be issued because device was closed */ 6341 CU_ASSERT(g_event_type1 == 0xFF); 6342 /* Unregister callback is issued */ 6343 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 6344 CU_ASSERT(g_unregister_rc == 0); 6345 6346 free_bdev(bdev); 6347 } 6348 6349 static int 6350 count_bdevs(void *ctx, struct spdk_bdev *bdev) 6351 { 6352 int *count = ctx; 6353 6354 (*count)++; 6355 6356 return 0; 6357 } 6358 6359 static void 6360 for_each_bdev_test(void) 6361 { 6362 struct spdk_bdev *bdev[8]; 6363 int rc, count; 6364 6365 bdev[0] = allocate_bdev("bdev0"); 6366 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 6367 6368 bdev[1] = allocate_bdev("bdev1"); 6369 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 6370 CU_ASSERT(rc == 0); 6371 6372 bdev[2] = allocate_bdev("bdev2"); 6373 6374 bdev[3] = allocate_bdev("bdev3"); 6375 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 6376 CU_ASSERT(rc == 0); 6377 6378 bdev[4] = allocate_bdev("bdev4"); 6379 6380 bdev[5] = allocate_bdev("bdev5"); 6381 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 6382 CU_ASSERT(rc == 0); 6383 6384 bdev[6] = allocate_bdev("bdev6"); 6385 6386 bdev[7] = allocate_bdev("bdev7"); 6387 6388 count = 0; 6389 rc = spdk_for_each_bdev(&count, count_bdevs); 6390 CU_ASSERT(rc == 0); 6391 CU_ASSERT(count == 7); 6392 6393 count = 0; 6394 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 6395 CU_ASSERT(rc == 0); 6396 CU_ASSERT(count == 4); 6397 6398 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 6399 free_bdev(bdev[0]); 6400 free_bdev(bdev[1]); 6401 free_bdev(bdev[2]); 6402 free_bdev(bdev[3]); 6403 free_bdev(bdev[4]); 6404 free_bdev(bdev[5]); 6405 free_bdev(bdev[6]); 6406 free_bdev(bdev[7]); 6407 } 6408 6409 static void 6410 bdev_seek_test(void) 6411 { 6412 struct spdk_bdev *bdev; 6413 struct spdk_bdev_desc *desc = NULL; 6414 struct spdk_io_channel *io_ch; 6415 int rc; 6416 6417 ut_init_bdev(NULL); 6418 poll_threads(); 6419 6420 bdev = allocate_bdev("bdev0"); 6421 6422 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6423 CU_ASSERT(rc == 0); 6424 poll_threads(); 6425 SPDK_CU_ASSERT_FATAL(desc != NULL); 6426 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6427 io_ch = spdk_bdev_get_io_channel(desc); 6428 CU_ASSERT(io_ch != NULL); 6429 6430 /* Seek data not supported */ 6431 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6432 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6433 CU_ASSERT(rc == 0); 6434 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6435 poll_threads(); 6436 CU_ASSERT(g_seek_offset == 0); 6437 6438 /* Seek hole not supported */ 6439 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6440 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6441 CU_ASSERT(rc == 0); 6442 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6443 poll_threads(); 6444 CU_ASSERT(g_seek_offset == UINT64_MAX); 6445 6446 /* Seek data supported */ 6447 g_seek_data_offset = 12345; 6448 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6449 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6450 CU_ASSERT(rc == 0); 6451 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6452 stub_complete_io(1); 6453 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6454 CU_ASSERT(g_seek_offset == 12345); 6455 6456 /* Seek hole supported */ 6457 g_seek_hole_offset = 67890; 6458 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6459 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6460 CU_ASSERT(rc == 0); 6461 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6462 stub_complete_io(1); 6463 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6464 CU_ASSERT(g_seek_offset == 67890); 6465 6466 spdk_put_io_channel(io_ch); 6467 spdk_bdev_close(desc); 6468 free_bdev(bdev); 6469 ut_fini_bdev(); 6470 } 6471 6472 static void 6473 bdev_copy(void) 6474 { 6475 struct spdk_bdev *bdev; 6476 struct spdk_bdev_desc *desc = NULL; 6477 struct spdk_io_channel *ioch; 6478 struct ut_expected_io *expected_io; 6479 uint64_t src_offset, num_blocks; 6480 uint32_t num_completed; 6481 int rc; 6482 6483 ut_init_bdev(NULL); 6484 bdev = allocate_bdev("bdev"); 6485 6486 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6487 CU_ASSERT_EQUAL(rc, 0); 6488 SPDK_CU_ASSERT_FATAL(desc != NULL); 6489 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6490 ioch = spdk_bdev_get_io_channel(desc); 6491 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6492 6493 fn_table.submit_request = stub_submit_request; 6494 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6495 6496 /* First test that if the bdev supports copy, the request won't be split */ 6497 bdev->md_len = 0; 6498 bdev->blocklen = 512; 6499 num_blocks = 128; 6500 src_offset = bdev->blockcnt - num_blocks; 6501 6502 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6503 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6504 6505 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6506 CU_ASSERT_EQUAL(rc, 0); 6507 num_completed = stub_complete_io(1); 6508 CU_ASSERT_EQUAL(num_completed, 1); 6509 6510 /* Check that if copy is not supported it'll still work */ 6511 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6512 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6514 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6515 6516 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6517 6518 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6519 CU_ASSERT_EQUAL(rc, 0); 6520 num_completed = stub_complete_io(1); 6521 CU_ASSERT_EQUAL(num_completed, 1); 6522 num_completed = stub_complete_io(1); 6523 CU_ASSERT_EQUAL(num_completed, 1); 6524 6525 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6526 spdk_put_io_channel(ioch); 6527 spdk_bdev_close(desc); 6528 free_bdev(bdev); 6529 ut_fini_bdev(); 6530 } 6531 6532 static void 6533 bdev_copy_split_test(void) 6534 { 6535 struct spdk_bdev *bdev; 6536 struct spdk_bdev_desc *desc = NULL; 6537 struct spdk_io_channel *ioch; 6538 struct spdk_bdev_channel *bdev_ch; 6539 struct ut_expected_io *expected_io; 6540 struct spdk_bdev_opts bdev_opts = {}; 6541 uint32_t i, num_outstanding; 6542 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6543 int rc; 6544 6545 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6546 bdev_opts.bdev_io_pool_size = 512; 6547 bdev_opts.bdev_io_cache_size = 64; 6548 rc = spdk_bdev_set_opts(&bdev_opts); 6549 CU_ASSERT(rc == 0); 6550 6551 ut_init_bdev(NULL); 6552 bdev = allocate_bdev("bdev"); 6553 6554 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6555 CU_ASSERT_EQUAL(rc, 0); 6556 SPDK_CU_ASSERT_FATAL(desc != NULL); 6557 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6558 ioch = spdk_bdev_get_io_channel(desc); 6559 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6560 bdev_ch = spdk_io_channel_get_ctx(ioch); 6561 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6562 6563 fn_table.submit_request = stub_submit_request; 6564 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6565 6566 /* Case 1: First test the request won't be split */ 6567 num_blocks = 32; 6568 src_offset = bdev->blockcnt - num_blocks; 6569 6570 g_io_done = false; 6571 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6572 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6573 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6574 CU_ASSERT_EQUAL(rc, 0); 6575 CU_ASSERT(g_io_done == false); 6576 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6577 stub_complete_io(1); 6578 CU_ASSERT(g_io_done == true); 6579 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6580 6581 /* Case 2: Test the split with 2 children requests */ 6582 max_copy_blocks = 8; 6583 bdev->max_copy = max_copy_blocks; 6584 num_children = 2; 6585 num_blocks = max_copy_blocks * num_children; 6586 offset = 0; 6587 src_offset = bdev->blockcnt - num_blocks; 6588 6589 g_io_done = false; 6590 for (i = 0; i < num_children; i++) { 6591 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6592 src_offset + offset, max_copy_blocks); 6593 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6594 offset += max_copy_blocks; 6595 } 6596 6597 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6598 CU_ASSERT_EQUAL(rc, 0); 6599 CU_ASSERT(g_io_done == false); 6600 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6601 stub_complete_io(num_children); 6602 CU_ASSERT(g_io_done == true); 6603 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6604 6605 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6606 num_children = 15; 6607 num_blocks = max_copy_blocks * num_children; 6608 offset = 0; 6609 src_offset = bdev->blockcnt - num_blocks; 6610 6611 g_io_done = false; 6612 for (i = 0; i < num_children; i++) { 6613 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6614 src_offset + offset, max_copy_blocks); 6615 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6616 offset += max_copy_blocks; 6617 } 6618 6619 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6620 CU_ASSERT_EQUAL(rc, 0); 6621 CU_ASSERT(g_io_done == false); 6622 6623 while (num_children > 0) { 6624 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6625 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6626 stub_complete_io(num_outstanding); 6627 num_children -= num_outstanding; 6628 } 6629 CU_ASSERT(g_io_done == true); 6630 6631 /* Case 4: Same test scenario as the case 2 but the configuration is different. 6632 * Copy is not supported. 6633 */ 6634 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6635 6636 num_children = 2; 6637 max_copy_blocks = spdk_bdev_get_max_copy(bdev); 6638 num_blocks = max_copy_blocks * num_children; 6639 src_offset = bdev->blockcnt - num_blocks; 6640 offset = 0; 6641 6642 g_io_done = false; 6643 for (i = 0; i < num_children; i++) { 6644 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, 6645 max_copy_blocks, 0); 6646 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6647 src_offset += max_copy_blocks; 6648 } 6649 for (i = 0; i < num_children; i++) { 6650 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, 6651 max_copy_blocks, 0); 6652 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6653 offset += max_copy_blocks; 6654 } 6655 6656 src_offset = bdev->blockcnt - num_blocks; 6657 offset = 0; 6658 6659 rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL); 6660 CU_ASSERT_EQUAL(rc, 0); 6661 CU_ASSERT(g_io_done == false); 6662 6663 while (num_children > 0) { 6664 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6665 6666 /* One copy request is split into one read and one write requests. */ 6667 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6668 stub_complete_io(num_outstanding); 6669 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6670 stub_complete_io(num_outstanding); 6671 6672 num_children -= num_outstanding; 6673 } 6674 CU_ASSERT(g_io_done == true); 6675 6676 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6677 6678 spdk_put_io_channel(ioch); 6679 spdk_bdev_close(desc); 6680 free_bdev(bdev); 6681 ut_fini_bdev(); 6682 } 6683 6684 static void 6685 examine_claim_v1(struct spdk_bdev *bdev) 6686 { 6687 int rc; 6688 6689 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6690 CU_ASSERT(rc == 0); 6691 } 6692 6693 static void 6694 examine_no_lock_held(struct spdk_bdev *bdev) 6695 { 6696 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6697 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6698 } 6699 6700 struct examine_claim_v2_ctx { 6701 struct ut_examine_ctx examine_ctx; 6702 enum spdk_bdev_claim_type claim_type; 6703 struct spdk_bdev_desc *desc; 6704 }; 6705 6706 static void 6707 examine_claim_v2(struct spdk_bdev *bdev) 6708 { 6709 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6710 int rc; 6711 6712 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6713 CU_ASSERT(rc == 0); 6714 6715 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6716 CU_ASSERT(rc == 0); 6717 } 6718 6719 static void 6720 examine_locks(void) 6721 { 6722 struct spdk_bdev *bdev; 6723 struct ut_examine_ctx ctx = { 0 }; 6724 struct examine_claim_v2_ctx v2_ctx; 6725 6726 /* Without any claims, one code path is taken */ 6727 ctx.examine_config = examine_no_lock_held; 6728 ctx.examine_disk = examine_no_lock_held; 6729 bdev = allocate_bdev_ctx("bdev0", &ctx); 6730 CU_ASSERT(ctx.examine_config_count == 1); 6731 CU_ASSERT(ctx.examine_disk_count == 1); 6732 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6733 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6734 free_bdev(bdev); 6735 6736 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6737 memset(&ctx, 0, sizeof(ctx)); 6738 ctx.examine_config = examine_claim_v1; 6739 ctx.examine_disk = examine_no_lock_held; 6740 bdev = allocate_bdev_ctx("bdev0", &ctx); 6741 CU_ASSERT(ctx.examine_config_count == 1); 6742 CU_ASSERT(ctx.examine_disk_count == 1); 6743 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6744 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6745 spdk_bdev_module_release_bdev(bdev); 6746 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6747 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6748 free_bdev(bdev); 6749 6750 /* Exercise the final path that comes with v2 claims. */ 6751 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6752 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6753 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6754 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6755 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6756 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6757 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6758 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6759 spdk_bdev_close(v2_ctx.desc); 6760 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6761 free_bdev(bdev); 6762 } 6763 6764 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6765 do { \ 6766 uint32_t len = 0; \ 6767 struct spdk_bdev_module_claim *claim; \ 6768 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6769 len++; \ 6770 } \ 6771 CU_ASSERT(len == expect); \ 6772 } while (0) 6773 6774 static void 6775 claim_v2_rwo(void) 6776 { 6777 struct spdk_bdev *bdev; 6778 struct spdk_bdev_desc *desc; 6779 struct spdk_bdev_desc *desc2; 6780 struct spdk_bdev_claim_opts opts; 6781 int rc; 6782 6783 bdev = allocate_bdev("bdev0"); 6784 6785 /* Claim without options */ 6786 desc = NULL; 6787 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6788 CU_ASSERT(rc == 0); 6789 SPDK_CU_ASSERT_FATAL(desc != NULL); 6790 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6791 &bdev_ut_if); 6792 CU_ASSERT(rc == 0); 6793 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6794 CU_ASSERT(desc->claim != NULL); 6795 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6796 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6797 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6798 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6799 6800 /* Release the claim by closing the descriptor */ 6801 spdk_bdev_close(desc); 6802 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6803 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6804 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6805 6806 /* Claim with options */ 6807 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6808 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6809 desc = NULL; 6810 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6811 CU_ASSERT(rc == 0); 6812 SPDK_CU_ASSERT_FATAL(desc != NULL); 6813 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6814 &bdev_ut_if); 6815 CU_ASSERT(rc == 0); 6816 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6817 CU_ASSERT(desc->claim != NULL); 6818 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6819 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6820 memset(&opts, 0, sizeof(opts)); 6821 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6822 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6823 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6824 6825 /* The claim blocks new writers. */ 6826 desc2 = NULL; 6827 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6828 CU_ASSERT(rc == -EPERM); 6829 CU_ASSERT(desc2 == NULL); 6830 6831 /* New readers are allowed */ 6832 desc2 = NULL; 6833 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6834 CU_ASSERT(rc == 0); 6835 CU_ASSERT(desc2 != NULL); 6836 CU_ASSERT(!desc2->write); 6837 6838 /* No new v2 RWO claims are allowed */ 6839 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6840 &bdev_ut_if); 6841 CU_ASSERT(rc == -EPERM); 6842 6843 /* No new v2 ROM claims are allowed */ 6844 CU_ASSERT(!desc2->write); 6845 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6846 &bdev_ut_if); 6847 CU_ASSERT(rc == -EPERM); 6848 CU_ASSERT(!desc2->write); 6849 6850 /* No new v2 RWM claims are allowed */ 6851 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6852 opts.shared_claim_key = (uint64_t)&opts; 6853 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6854 &bdev_ut_if); 6855 CU_ASSERT(rc == -EPERM); 6856 CU_ASSERT(!desc2->write); 6857 6858 /* No new v1 claims are allowed */ 6859 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6860 CU_ASSERT(rc == -EPERM); 6861 6862 /* None of the above changed the existing claim */ 6863 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6864 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6865 6866 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6867 spdk_bdev_close(desc); 6868 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6869 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6870 CU_ASSERT(!desc2->write); 6871 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6872 &bdev_ut_if); 6873 CU_ASSERT(rc == 0); 6874 CU_ASSERT(desc2->claim != NULL); 6875 CU_ASSERT(desc2->write); 6876 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6877 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6878 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6879 spdk_bdev_close(desc2); 6880 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6881 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6882 6883 /* Cannot claim with a key */ 6884 desc = NULL; 6885 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6886 CU_ASSERT(rc == 0); 6887 SPDK_CU_ASSERT_FATAL(desc != NULL); 6888 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6889 opts.shared_claim_key = (uint64_t)&opts; 6890 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6891 &bdev_ut_if); 6892 CU_ASSERT(rc == -EINVAL); 6893 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6894 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6895 spdk_bdev_close(desc); 6896 6897 /* Clean up */ 6898 free_bdev(bdev); 6899 } 6900 6901 static void 6902 claim_v2_rom(void) 6903 { 6904 struct spdk_bdev *bdev; 6905 struct spdk_bdev_desc *desc; 6906 struct spdk_bdev_desc *desc2; 6907 struct spdk_bdev_claim_opts opts; 6908 int rc; 6909 6910 bdev = allocate_bdev("bdev0"); 6911 6912 /* Claim without options */ 6913 desc = NULL; 6914 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6915 CU_ASSERT(rc == 0); 6916 SPDK_CU_ASSERT_FATAL(desc != NULL); 6917 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6918 &bdev_ut_if); 6919 CU_ASSERT(rc == 0); 6920 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6921 CU_ASSERT(desc->claim != NULL); 6922 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6923 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6924 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6925 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6926 6927 /* Release the claim by closing the descriptor */ 6928 spdk_bdev_close(desc); 6929 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6930 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6931 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6932 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6933 6934 /* Claim with options */ 6935 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6936 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6937 desc = NULL; 6938 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6939 CU_ASSERT(rc == 0); 6940 SPDK_CU_ASSERT_FATAL(desc != NULL); 6941 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6942 &bdev_ut_if); 6943 CU_ASSERT(rc == 0); 6944 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6945 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6946 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6947 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6948 memset(&opts, 0, sizeof(opts)); 6949 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6950 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6951 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6952 6953 /* The claim blocks new writers. */ 6954 desc2 = NULL; 6955 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6956 CU_ASSERT(rc == -EPERM); 6957 CU_ASSERT(desc2 == NULL); 6958 6959 /* New readers are allowed */ 6960 desc2 = NULL; 6961 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6962 CU_ASSERT(rc == 0); 6963 CU_ASSERT(desc2 != NULL); 6964 CU_ASSERT(!desc2->write); 6965 6966 /* No new v2 RWO claims are allowed */ 6967 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6968 &bdev_ut_if); 6969 CU_ASSERT(rc == -EPERM); 6970 6971 /* No new v2 RWM claims are allowed */ 6972 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6973 opts.shared_claim_key = (uint64_t)&opts; 6974 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6975 &bdev_ut_if); 6976 CU_ASSERT(rc == -EPERM); 6977 CU_ASSERT(!desc2->write); 6978 6979 /* No new v1 claims are allowed */ 6980 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6981 CU_ASSERT(rc == -EPERM); 6982 6983 /* None of the above messed up the existing claim */ 6984 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6985 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6986 6987 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 6988 CU_ASSERT(!desc2->write); 6989 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6990 &bdev_ut_if); 6991 CU_ASSERT(rc == 0); 6992 CU_ASSERT(!desc2->write); 6993 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6994 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6995 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6996 6997 /* Claim remains when closing the first descriptor */ 6998 spdk_bdev_close(desc); 6999 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 7000 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 7001 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 7002 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7003 7004 /* Claim removed when closing the other descriptor */ 7005 spdk_bdev_close(desc2); 7006 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7007 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7008 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7009 7010 /* Cannot claim with a key */ 7011 desc = NULL; 7012 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7013 CU_ASSERT(rc == 0); 7014 SPDK_CU_ASSERT_FATAL(desc != NULL); 7015 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7016 opts.shared_claim_key = (uint64_t)&opts; 7017 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 7018 &bdev_ut_if); 7019 CU_ASSERT(rc == -EINVAL); 7020 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7021 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7022 spdk_bdev_close(desc); 7023 7024 /* Cannot claim with a read-write descriptor */ 7025 desc = NULL; 7026 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7027 CU_ASSERT(rc == 0); 7028 SPDK_CU_ASSERT_FATAL(desc != NULL); 7029 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7030 &bdev_ut_if); 7031 CU_ASSERT(rc == -EINVAL); 7032 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7033 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7034 spdk_bdev_close(desc); 7035 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7036 7037 /* Clean up */ 7038 free_bdev(bdev); 7039 } 7040 7041 static void 7042 claim_v2_rwm(void) 7043 { 7044 struct spdk_bdev *bdev; 7045 struct spdk_bdev_desc *desc; 7046 struct spdk_bdev_desc *desc2; 7047 struct spdk_bdev_claim_opts opts; 7048 char good_key, bad_key; 7049 int rc; 7050 7051 bdev = allocate_bdev("bdev0"); 7052 7053 /* Claim without options should fail */ 7054 desc = NULL; 7055 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7056 CU_ASSERT(rc == 0); 7057 SPDK_CU_ASSERT_FATAL(desc != NULL); 7058 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 7059 &bdev_ut_if); 7060 CU_ASSERT(rc == -EINVAL); 7061 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7062 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7063 CU_ASSERT(desc->claim == NULL); 7064 7065 /* Claim with options */ 7066 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7067 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 7068 opts.shared_claim_key = (uint64_t)&good_key; 7069 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7070 &bdev_ut_if); 7071 CU_ASSERT(rc == 0); 7072 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 7073 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 7074 CU_ASSERT(desc->claim->module == &bdev_ut_if); 7075 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 7076 memset(&opts, 0, sizeof(opts)); 7077 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 7078 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7079 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7080 7081 /* The claim blocks new writers. */ 7082 desc2 = NULL; 7083 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7084 CU_ASSERT(rc == -EPERM); 7085 CU_ASSERT(desc2 == NULL); 7086 7087 /* New readers are allowed */ 7088 desc2 = NULL; 7089 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 7090 CU_ASSERT(rc == 0); 7091 CU_ASSERT(desc2 != NULL); 7092 CU_ASSERT(!desc2->write); 7093 7094 /* No new v2 RWO claims are allowed */ 7095 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 7096 &bdev_ut_if); 7097 CU_ASSERT(rc == -EPERM); 7098 7099 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 7100 CU_ASSERT(!desc2->write); 7101 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7102 &bdev_ut_if); 7103 CU_ASSERT(rc == -EPERM); 7104 CU_ASSERT(!desc2->write); 7105 7106 /* No new v1 claims are allowed */ 7107 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7108 CU_ASSERT(rc == -EPERM); 7109 7110 /* No new v2 RWM claims are allowed if the key does not match */ 7111 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7112 opts.shared_claim_key = (uint64_t)&bad_key; 7113 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7114 &bdev_ut_if); 7115 CU_ASSERT(rc == -EPERM); 7116 CU_ASSERT(!desc2->write); 7117 7118 /* None of the above messed up the existing claim */ 7119 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7120 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7121 7122 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 7123 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7124 opts.shared_claim_key = (uint64_t)&good_key; 7125 CU_ASSERT(!desc2->write); 7126 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7127 &bdev_ut_if); 7128 CU_ASSERT(rc == 0); 7129 CU_ASSERT(desc2->write); 7130 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 7131 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 7132 7133 /* Claim remains when closing the first descriptor */ 7134 spdk_bdev_close(desc); 7135 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 7136 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 7137 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 7138 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7139 7140 /* Claim removed when closing the other descriptor */ 7141 spdk_bdev_close(desc2); 7142 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7143 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7144 7145 /* Cannot claim without a key */ 7146 desc = NULL; 7147 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7148 CU_ASSERT(rc == 0); 7149 SPDK_CU_ASSERT_FATAL(desc != NULL); 7150 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7151 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7152 &bdev_ut_if); 7153 CU_ASSERT(rc == -EINVAL); 7154 spdk_bdev_close(desc); 7155 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7156 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7157 7158 /* Clean up */ 7159 free_bdev(bdev); 7160 } 7161 7162 static void 7163 claim_v2_existing_writer(void) 7164 { 7165 struct spdk_bdev *bdev; 7166 struct spdk_bdev_desc *desc; 7167 struct spdk_bdev_desc *desc2; 7168 struct spdk_bdev_claim_opts opts; 7169 enum spdk_bdev_claim_type type; 7170 enum spdk_bdev_claim_type types[] = { 7171 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7172 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7173 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7174 }; 7175 size_t i; 7176 int rc; 7177 7178 bdev = allocate_bdev("bdev0"); 7179 7180 desc = NULL; 7181 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7182 CU_ASSERT(rc == 0); 7183 SPDK_CU_ASSERT_FATAL(desc != NULL); 7184 desc2 = NULL; 7185 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7186 CU_ASSERT(rc == 0); 7187 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 7188 7189 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7190 type = types[i]; 7191 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7192 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7193 opts.shared_claim_key = (uint64_t)&opts; 7194 } 7195 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7196 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7197 CU_ASSERT(rc == -EINVAL); 7198 } else { 7199 CU_ASSERT(rc == -EPERM); 7200 } 7201 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7202 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 7203 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7204 CU_ASSERT(rc == -EINVAL); 7205 } else { 7206 CU_ASSERT(rc == -EPERM); 7207 } 7208 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7209 } 7210 7211 spdk_bdev_close(desc); 7212 spdk_bdev_close(desc2); 7213 7214 /* Clean up */ 7215 free_bdev(bdev); 7216 } 7217 7218 static void 7219 claim_v2_existing_v1(void) 7220 { 7221 struct spdk_bdev *bdev; 7222 struct spdk_bdev_desc *desc; 7223 struct spdk_bdev_claim_opts opts; 7224 enum spdk_bdev_claim_type type; 7225 enum spdk_bdev_claim_type types[] = { 7226 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7227 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7228 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7229 }; 7230 size_t i; 7231 int rc; 7232 7233 bdev = allocate_bdev("bdev0"); 7234 7235 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7236 CU_ASSERT(rc == 0); 7237 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7238 7239 desc = NULL; 7240 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7241 CU_ASSERT(rc == 0); 7242 SPDK_CU_ASSERT_FATAL(desc != NULL); 7243 7244 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7245 type = types[i]; 7246 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7247 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7248 opts.shared_claim_key = (uint64_t)&opts; 7249 } 7250 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7251 CU_ASSERT(rc == -EPERM); 7252 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7253 } 7254 7255 spdk_bdev_module_release_bdev(bdev); 7256 spdk_bdev_close(desc); 7257 7258 /* Clean up */ 7259 free_bdev(bdev); 7260 } 7261 7262 static void 7263 claim_v1_existing_v2(void) 7264 { 7265 struct spdk_bdev *bdev; 7266 struct spdk_bdev_desc *desc; 7267 struct spdk_bdev_claim_opts opts; 7268 enum spdk_bdev_claim_type type; 7269 enum spdk_bdev_claim_type types[] = { 7270 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7271 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7272 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7273 }; 7274 size_t i; 7275 int rc; 7276 7277 bdev = allocate_bdev("bdev0"); 7278 7279 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7280 type = types[i]; 7281 7282 desc = NULL; 7283 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7284 CU_ASSERT(rc == 0); 7285 SPDK_CU_ASSERT_FATAL(desc != NULL); 7286 7287 /* Get a v2 claim */ 7288 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7289 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7290 opts.shared_claim_key = (uint64_t)&opts; 7291 } 7292 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7293 CU_ASSERT(rc == 0); 7294 7295 /* Fail to get a v1 claim */ 7296 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7297 CU_ASSERT(rc == -EPERM); 7298 7299 spdk_bdev_close(desc); 7300 7301 /* Now v1 succeeds */ 7302 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7303 CU_ASSERT(rc == 0) 7304 spdk_bdev_module_release_bdev(bdev); 7305 } 7306 7307 /* Clean up */ 7308 free_bdev(bdev); 7309 } 7310 7311 static int ut_examine_claimed_init0(void); 7312 static int ut_examine_claimed_init1(void); 7313 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 7314 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 7315 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 7316 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 7317 7318 #define UT_MAX_EXAMINE_MODS 2 7319 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 7320 { 7321 .name = "vbdev_ut_examine0", 7322 .module_init = ut_examine_claimed_init0, 7323 .module_fini = vbdev_ut_module_fini, 7324 .examine_config = ut_examine_claimed_config0, 7325 .examine_disk = ut_examine_claimed_disk0, 7326 }, 7327 { 7328 .name = "vbdev_ut_examine1", 7329 .module_init = ut_examine_claimed_init1, 7330 .module_fini = vbdev_ut_module_fini, 7331 .examine_config = ut_examine_claimed_config1, 7332 .examine_disk = ut_examine_claimed_disk1, 7333 } 7334 }; 7335 7336 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 7337 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 7338 7339 struct ut_examine_claimed_ctx { 7340 uint32_t examine_config_count; 7341 uint32_t examine_disk_count; 7342 7343 /* Claim type to take, with these options */ 7344 enum spdk_bdev_claim_type claim_type; 7345 struct spdk_bdev_claim_opts claim_opts; 7346 7347 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 7348 int expect_claim_err; 7349 7350 /* Descriptor used for a claim */ 7351 struct spdk_bdev_desc *desc; 7352 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 7353 7354 bool ut_testing_examine_claimed; 7355 7356 /* 7357 * Store the order in which the modules were initialized, 7358 * since we have no guarantee on the order of execution of the constructors. 7359 * Modules are examined in reverse order of their initialization. 7360 */ 7361 static int g_ut_examine_claimed_order[UT_MAX_EXAMINE_MODS]; 7362 static int 7363 ut_examine_claimed_init(uint32_t modnum) 7364 { 7365 static int current = UT_MAX_EXAMINE_MODS; 7366 7367 /* Only do this for the first initialization of the bdev framework */ 7368 if (current == 0) { 7369 return 0; 7370 } 7371 g_ut_examine_claimed_order[modnum] = --current; 7372 7373 return 0; 7374 } 7375 7376 static int 7377 ut_examine_claimed_init0(void) 7378 { 7379 return ut_examine_claimed_init(0); 7380 } 7381 7382 static int 7383 ut_examine_claimed_init1(void) 7384 { 7385 return ut_examine_claimed_init(1); 7386 } 7387 7388 static void 7389 reset_examine_claimed_ctx(void) 7390 { 7391 struct ut_examine_claimed_ctx *ctx; 7392 uint32_t i; 7393 7394 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 7395 ctx = &examine_claimed_ctx[i]; 7396 if (ctx->desc != NULL) { 7397 spdk_bdev_close(ctx->desc); 7398 } 7399 memset(ctx, 0, sizeof(*ctx)); 7400 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 7401 } 7402 } 7403 7404 static void 7405 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 7406 { 7407 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7408 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7409 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7410 int rc; 7411 7412 if (!ut_testing_examine_claimed) { 7413 spdk_bdev_module_examine_done(module); 7414 return; 7415 } 7416 7417 ctx->examine_config_count++; 7418 7419 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 7420 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 7421 &ctx->desc); 7422 CU_ASSERT(rc == 0); 7423 7424 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 7425 CU_ASSERT(rc == ctx->expect_claim_err); 7426 } 7427 spdk_bdev_module_examine_done(module); 7428 } 7429 7430 static void 7431 ut_examine_claimed_config0(struct spdk_bdev *bdev) 7432 { 7433 examine_claimed_config(bdev, g_ut_examine_claimed_order[0]); 7434 } 7435 7436 static void 7437 ut_examine_claimed_config1(struct spdk_bdev *bdev) 7438 { 7439 examine_claimed_config(bdev, g_ut_examine_claimed_order[1]); 7440 } 7441 7442 static void 7443 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 7444 { 7445 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7446 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7447 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7448 7449 if (!ut_testing_examine_claimed) { 7450 spdk_bdev_module_examine_done(module); 7451 return; 7452 } 7453 7454 ctx->examine_disk_count++; 7455 7456 spdk_bdev_module_examine_done(module); 7457 } 7458 7459 static void 7460 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 7461 { 7462 examine_claimed_disk(bdev, 0); 7463 } 7464 7465 static void 7466 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 7467 { 7468 examine_claimed_disk(bdev, 1); 7469 } 7470 7471 static bool g_examine_done = false; 7472 7473 static void 7474 ut_examine_done_cb(void *ctx) 7475 { 7476 g_examine_done = true; 7477 } 7478 7479 static void 7480 examine_claimed_common(bool autoexamine) 7481 { 7482 struct spdk_bdev *bdev; 7483 struct spdk_bdev_module *mod = examine_claimed_mods; 7484 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 7485 struct spdk_bdev_opts bdev_opts = {}; 7486 int rc; 7487 7488 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 7489 bdev_opts.bdev_auto_examine = autoexamine; 7490 ut_init_bdev(&bdev_opts); 7491 7492 ut_testing_examine_claimed = true; 7493 reset_examine_claimed_ctx(); 7494 7495 /* 7496 * With one module claiming, both modules' examine_config should be called, but only the 7497 * claiming module's examine_disk should be called. 7498 */ 7499 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7500 g_examine_done = false; 7501 bdev = allocate_bdev("bdev0"); 7502 7503 if (!autoexamine) { 7504 rc = spdk_bdev_examine("bdev0"); 7505 CU_ASSERT(rc == 0); 7506 rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL); 7507 CU_ASSERT(rc == 0); 7508 CU_ASSERT(!g_examine_done); 7509 poll_threads(); 7510 CU_ASSERT(g_examine_done); 7511 } 7512 7513 CU_ASSERT(ctx[0].examine_config_count == 1); 7514 CU_ASSERT(ctx[0].examine_disk_count == 1); 7515 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7516 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7517 CU_ASSERT(ctx[1].examine_config_count == 1); 7518 CU_ASSERT(ctx[1].examine_disk_count == 0); 7519 CU_ASSERT(ctx[1].desc == NULL); 7520 reset_examine_claimed_ctx(); 7521 free_bdev(bdev); 7522 7523 /* 7524 * With two modules claiming, both modules' examine_config and examine_disk should be 7525 * called. 7526 */ 7527 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7528 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7529 g_examine_done = false; 7530 bdev = allocate_bdev("bdev0"); 7531 7532 if (!autoexamine) { 7533 rc = spdk_bdev_examine("bdev0"); 7534 CU_ASSERT(rc == 0); 7535 rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL); 7536 CU_ASSERT(rc == 0); 7537 CU_ASSERT(!g_examine_done); 7538 poll_threads(); 7539 CU_ASSERT(g_examine_done); 7540 } 7541 7542 CU_ASSERT(ctx[0].examine_config_count == 1); 7543 CU_ASSERT(ctx[0].examine_disk_count == 1); 7544 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7545 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7546 CU_ASSERT(ctx[1].examine_config_count == 1); 7547 CU_ASSERT(ctx[1].examine_disk_count == 1); 7548 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7549 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7550 reset_examine_claimed_ctx(); 7551 free_bdev(bdev); 7552 7553 /* 7554 * If two vbdev modules try to claim with conflicting claim types, the module that was added 7555 * last wins. The winner gets the claim and is the only one that has its examine_disk 7556 * callback invoked. 7557 */ 7558 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7559 ctx[0].expect_claim_err = -EPERM; 7560 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 7561 g_examine_done = false; 7562 bdev = allocate_bdev("bdev0"); 7563 7564 if (!autoexamine) { 7565 rc = spdk_bdev_examine("bdev0"); 7566 CU_ASSERT(rc == 0); 7567 rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL); 7568 CU_ASSERT(rc == 0); 7569 CU_ASSERT(!g_examine_done); 7570 poll_threads(); 7571 CU_ASSERT(g_examine_done); 7572 } 7573 7574 CU_ASSERT(ctx[0].examine_config_count == 1); 7575 CU_ASSERT(ctx[0].examine_disk_count == 0); 7576 CU_ASSERT(ctx[1].examine_config_count == 1); 7577 CU_ASSERT(ctx[1].examine_disk_count == 1); 7578 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7579 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7580 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7581 reset_examine_claimed_ctx(); 7582 free_bdev(bdev); 7583 7584 ut_testing_examine_claimed = false; 7585 7586 ut_fini_bdev(); 7587 } 7588 7589 static void 7590 examine_claimed(void) 7591 { 7592 examine_claimed_common(true); 7593 } 7594 7595 static void 7596 examine_claimed_manual(void) 7597 { 7598 examine_claimed_common(false); 7599 } 7600 7601 static void 7602 get_numa_id(void) 7603 { 7604 struct spdk_bdev bdev = {}; 7605 7606 bdev.numa.id = 0; 7607 bdev.numa.id_valid = 0; 7608 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY); 7609 7610 bdev.numa.id_valid = 1; 7611 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == 0); 7612 7613 bdev.numa.id = SPDK_ENV_NUMA_ID_ANY; 7614 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY); 7615 } 7616 7617 static void 7618 get_device_stat_with_reset_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, 7619 int rc) 7620 { 7621 *(bool *)cb_arg = true; 7622 } 7623 7624 static void 7625 get_device_stat_with_given_reset(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, 7626 enum spdk_bdev_reset_stat_mode mode) 7627 { 7628 bool done = false; 7629 7630 spdk_bdev_get_device_stat(bdev, stat, mode, get_device_stat_with_reset_cb, &done); 7631 while (!done) { poll_threads(); } 7632 } 7633 7634 static void 7635 get_device_stat_with_reset(void) 7636 { 7637 struct spdk_bdev *bdev; 7638 struct spdk_bdev_desc *desc = NULL; 7639 struct spdk_io_channel *io_ch; 7640 struct spdk_bdev_opts bdev_opts = {}; 7641 struct spdk_bdev_io_stat *stat; 7642 7643 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 7644 bdev_opts.bdev_io_pool_size = 2; 7645 bdev_opts.bdev_io_cache_size = 1; 7646 ut_init_bdev(&bdev_opts); 7647 bdev = allocate_bdev("bdev0"); 7648 7649 CU_ASSERT(spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc) == 0); 7650 SPDK_CU_ASSERT_FATAL(desc != NULL); 7651 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 7652 io_ch = spdk_bdev_get_io_channel(desc); 7653 CU_ASSERT(io_ch != NULL); 7654 7655 g_io_done = false; 7656 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 7657 spdk_delay_us(10); 7658 stub_complete_io(1); 7659 CU_ASSERT(g_io_done == true); 7660 7661 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 7662 SPDK_CU_ASSERT_FATAL(stat != NULL); 7663 7664 /* Get stat without resetting and check that it is correct */ 7665 get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_NONE); 7666 CU_ASSERT(stat->bytes_read == 4096); 7667 CU_ASSERT(stat->max_read_latency_ticks == 10); 7668 7669 /** 7670 * Check that stat was not reseted after previous step, 7671 * send get request with resetting maxmin stats 7672 */ 7673 get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_MAXMIN); 7674 CU_ASSERT(stat->bytes_read == 4096); 7675 CU_ASSERT(stat->max_read_latency_ticks == 10); 7676 7677 /** 7678 * Check that maxmins stats are reseted after previous step, 7679 * send get request with resetting all stats 7680 */ 7681 get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_ALL); 7682 CU_ASSERT(stat->bytes_read == 4096); 7683 CU_ASSERT(stat->max_read_latency_ticks == 0); 7684 7685 /* Check that all stats are reseted after previous step */ 7686 get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_NONE); 7687 CU_ASSERT(stat->bytes_read == 0); 7688 CU_ASSERT(stat->max_read_latency_ticks == 0); 7689 7690 free(stat); 7691 spdk_put_io_channel(io_ch); 7692 spdk_bdev_close(desc); 7693 free_bdev(bdev); 7694 ut_fini_bdev(); 7695 } 7696 7697 static void 7698 open_ext_v2_test(void) 7699 { 7700 struct spdk_bdev_open_opts opts; 7701 struct spdk_bdev *bdev; 7702 struct spdk_bdev_desc *desc; 7703 int rc; 7704 7705 bdev = allocate_bdev("bdev0"); 7706 7707 rc = spdk_bdev_open_ext_v2("bdev0", true, bdev_ut_event_cb, NULL, NULL, &desc); 7708 CU_ASSERT(rc == 0); 7709 SPDK_CU_ASSERT_FATAL(desc != NULL); 7710 CU_ASSERT(desc->write == true); 7711 CU_ASSERT(desc->opts.hide_metadata == false); 7712 7713 spdk_bdev_close(desc); 7714 7715 spdk_bdev_open_opts_init(&opts, sizeof(opts)); 7716 opts.hide_metadata = true; 7717 7718 rc = spdk_bdev_open_ext_v2("bdev0", true, bdev_ut_event_cb, NULL, &opts, &desc); 7719 CU_ASSERT(rc == 0); 7720 CU_ASSERT(desc->write == true); 7721 CU_ASSERT(desc->opts.hide_metadata == true); 7722 7723 spdk_bdev_close(desc); 7724 7725 free_bdev(bdev); 7726 } 7727 7728 static void 7729 bdev_io_init_dif_ctx_test(void) 7730 { 7731 struct spdk_bdev *bdev; 7732 struct spdk_bdev_io bdev_io; 7733 int rc; 7734 7735 bdev = allocate_bdev("bdev0"); 7736 7737 /* This is invalid because md_len should be larger than PI size. */ 7738 bdev->dif_pi_format = SPDK_DIF_PI_FORMAT_32; 7739 bdev->blocklen = 4096 + 8; 7740 bdev->md_len = 8; 7741 bdev->md_interleave = true; 7742 7743 bdev_io.bdev = bdev; 7744 7745 /* Check if initialization detects error. */ 7746 rc = bdev_io_init_dif_ctx(&bdev_io); 7747 CU_ASSERT(rc != 0); 7748 7749 /* Increase md_len to pass initialization check. */ 7750 bdev->blocklen = 4096 + 16; 7751 bdev->md_len = 16; 7752 7753 rc = bdev_io_init_dif_ctx(&bdev_io); 7754 CU_ASSERT(rc == 0); 7755 7756 free_bdev(bdev); 7757 } 7758 7759 int 7760 main(int argc, char **argv) 7761 { 7762 CU_pSuite suite = NULL; 7763 unsigned int num_failures; 7764 7765 CU_initialize_registry(); 7766 7767 suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown); 7768 7769 CU_ADD_TEST(suite, bytes_to_blocks_test); 7770 CU_ADD_TEST(suite, num_blocks_test); 7771 CU_ADD_TEST(suite, io_valid_test); 7772 CU_ADD_TEST(suite, open_write_test); 7773 CU_ADD_TEST(suite, claim_test); 7774 CU_ADD_TEST(suite, alias_add_del_test); 7775 CU_ADD_TEST(suite, get_device_stat_test); 7776 CU_ADD_TEST(suite, bdev_io_types_test); 7777 CU_ADD_TEST(suite, bdev_io_wait_test); 7778 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7779 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7780 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7781 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7782 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7783 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7784 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7785 CU_ADD_TEST(suite, bdev_io_alignment); 7786 CU_ADD_TEST(suite, bdev_histograms); 7787 CU_ADD_TEST(suite, bdev_write_zeroes); 7788 CU_ADD_TEST(suite, bdev_compare_and_write); 7789 CU_ADD_TEST(suite, bdev_compare); 7790 CU_ADD_TEST(suite, bdev_compare_emulated); 7791 CU_ADD_TEST(suite, bdev_zcopy_write); 7792 CU_ADD_TEST(suite, bdev_zcopy_read); 7793 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7794 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7795 CU_ADD_TEST(suite, bdev_open_ext_test); 7796 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7797 CU_ADD_TEST(suite, bdev_set_io_timeout); 7798 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7799 CU_ADD_TEST(suite, lba_range_overlap); 7800 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7801 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7802 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7803 CU_ADD_TEST(suite, bdev_quiesce); 7804 CU_ADD_TEST(suite, bdev_io_abort); 7805 CU_ADD_TEST(suite, bdev_unmap); 7806 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7807 CU_ADD_TEST(suite, bdev_set_options_test); 7808 CU_ADD_TEST(suite, bdev_get_memory_domains); 7809 CU_ADD_TEST(suite, bdev_io_ext); 7810 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7811 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7812 CU_ADD_TEST(suite, bdev_io_ext_split); 7813 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7814 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7815 CU_ADD_TEST(suite, bdev_unregister_by_name); 7816 CU_ADD_TEST(suite, for_each_bdev_test); 7817 CU_ADD_TEST(suite, bdev_seek_test); 7818 CU_ADD_TEST(suite, bdev_copy); 7819 CU_ADD_TEST(suite, bdev_copy_split_test); 7820 CU_ADD_TEST(suite, examine_locks); 7821 CU_ADD_TEST(suite, claim_v2_rwo); 7822 CU_ADD_TEST(suite, claim_v2_rom); 7823 CU_ADD_TEST(suite, claim_v2_rwm); 7824 CU_ADD_TEST(suite, claim_v2_existing_writer); 7825 CU_ADD_TEST(suite, claim_v2_existing_v1); 7826 CU_ADD_TEST(suite, claim_v1_existing_v2); 7827 CU_ADD_TEST(suite, examine_claimed); 7828 CU_ADD_TEST(suite, examine_claimed_manual); 7829 CU_ADD_TEST(suite, get_numa_id); 7830 CU_ADD_TEST(suite, get_device_stat_with_reset); 7831 CU_ADD_TEST(suite, open_ext_v2_test); 7832 CU_ADD_TEST(suite, bdev_io_init_dif_ctx_test); 7833 7834 allocate_cores(1); 7835 allocate_threads(1); 7836 set_thread(0); 7837 7838 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7839 CU_cleanup_registry(); 7840 7841 free_threads(); 7842 free_cores(); 7843 7844 return num_failures; 7845 } 7846