1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 DEFINE_STUB_V(spdk_accel_sequence_finish, 25 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 26 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 27 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 28 DEFINE_STUB(spdk_accel_append_copy, int, 29 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs, 30 uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 31 struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, 32 void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 33 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL); 34 35 static bool g_memory_domain_pull_data_called; 36 static bool g_memory_domain_push_data_called; 37 static int g_accel_io_device; 38 39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 40 int 41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 42 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 43 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 44 { 45 g_memory_domain_pull_data_called = true; 46 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 47 cpl_cb(cpl_cb_arg, 0); 48 return 0; 49 } 50 51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 52 int 53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 54 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 55 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 56 { 57 g_memory_domain_push_data_called = true; 58 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 59 cpl_cb(cpl_cb_arg, 0); 60 return 0; 61 } 62 63 struct spdk_io_channel * 64 spdk_accel_get_io_channel(void) 65 { 66 return spdk_get_io_channel(&g_accel_io_device); 67 } 68 69 int g_status; 70 int g_count; 71 enum spdk_bdev_event_type g_event_type1; 72 enum spdk_bdev_event_type g_event_type2; 73 enum spdk_bdev_event_type g_event_type3; 74 enum spdk_bdev_event_type g_event_type4; 75 struct spdk_histogram_data *g_histogram; 76 void *g_unregister_arg; 77 int g_unregister_rc; 78 79 void 80 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 81 int *sc, int *sk, int *asc, int *ascq) 82 { 83 } 84 85 static int 86 ut_accel_ch_create_cb(void *io_device, void *ctx) 87 { 88 return 0; 89 } 90 91 static void 92 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 93 { 94 } 95 96 static int 97 ut_bdev_setup(void) 98 { 99 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 100 ut_accel_ch_destroy_cb, 0, NULL); 101 return 0; 102 } 103 104 static int 105 ut_bdev_teardown(void) 106 { 107 spdk_io_device_unregister(&g_accel_io_device, NULL); 108 109 return 0; 110 } 111 112 static int 113 stub_destruct(void *ctx) 114 { 115 return 0; 116 } 117 118 struct ut_expected_io { 119 uint8_t type; 120 uint64_t offset; 121 uint64_t src_offset; 122 uint64_t length; 123 int iovcnt; 124 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 125 void *md_buf; 126 TAILQ_ENTRY(ut_expected_io) link; 127 }; 128 129 struct bdev_ut_channel { 130 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 131 uint32_t outstanding_io_count; 132 TAILQ_HEAD(, ut_expected_io) expected_io; 133 }; 134 135 static bool g_io_done; 136 static struct spdk_bdev_io *g_bdev_io; 137 static enum spdk_bdev_io_status g_io_status; 138 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 139 static uint32_t g_bdev_ut_io_device; 140 static struct bdev_ut_channel *g_bdev_ut_channel; 141 static void *g_compare_read_buf; 142 static uint32_t g_compare_read_buf_len; 143 static void *g_compare_write_buf; 144 static uint32_t g_compare_write_buf_len; 145 static void *g_compare_md_buf; 146 static bool g_abort_done; 147 static enum spdk_bdev_io_status g_abort_status; 148 static void *g_zcopy_read_buf; 149 static uint32_t g_zcopy_read_buf_len; 150 static void *g_zcopy_write_buf; 151 static uint32_t g_zcopy_write_buf_len; 152 static struct spdk_bdev_io *g_zcopy_bdev_io; 153 static uint64_t g_seek_data_offset; 154 static uint64_t g_seek_hole_offset; 155 static uint64_t g_seek_offset; 156 157 static struct ut_expected_io * 158 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 159 { 160 struct ut_expected_io *expected_io; 161 162 expected_io = calloc(1, sizeof(*expected_io)); 163 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 164 165 expected_io->type = type; 166 expected_io->offset = offset; 167 expected_io->length = length; 168 expected_io->iovcnt = iovcnt; 169 170 return expected_io; 171 } 172 173 static struct ut_expected_io * 174 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 175 { 176 struct ut_expected_io *expected_io; 177 178 expected_io = calloc(1, sizeof(*expected_io)); 179 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 180 181 expected_io->type = type; 182 expected_io->offset = offset; 183 expected_io->src_offset = src_offset; 184 expected_io->length = length; 185 186 return expected_io; 187 } 188 189 static void 190 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 191 { 192 expected_io->iov[pos].iov_base = base; 193 expected_io->iov[pos].iov_len = len; 194 } 195 196 static void 197 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 198 { 199 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 200 struct ut_expected_io *expected_io; 201 struct iovec *iov, *expected_iov; 202 struct spdk_bdev_io *bio_to_abort; 203 int i; 204 205 g_bdev_io = bdev_io; 206 207 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 208 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 209 210 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 211 CU_ASSERT(g_compare_read_buf_len == len); 212 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 213 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 214 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 215 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 216 } 217 } 218 219 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 220 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 221 222 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 223 CU_ASSERT(g_compare_write_buf_len == len); 224 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 225 } 226 227 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 228 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 229 230 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 231 CU_ASSERT(g_compare_read_buf_len == len); 232 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 233 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 234 } 235 if (bdev_io->u.bdev.md_buf && 236 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 237 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 238 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 239 } 240 } 241 242 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 243 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 244 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 245 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 246 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 247 ch->outstanding_io_count--; 248 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 249 break; 250 } 251 } 252 } 253 } 254 255 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 256 if (bdev_io->u.bdev.zcopy.start) { 257 g_zcopy_bdev_io = bdev_io; 258 if (bdev_io->u.bdev.zcopy.populate) { 259 /* Start of a read */ 260 CU_ASSERT(g_zcopy_read_buf != NULL); 261 CU_ASSERT(g_zcopy_read_buf_len > 0); 262 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 263 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 264 bdev_io->u.bdev.iovcnt = 1; 265 } else { 266 /* Start of a write */ 267 CU_ASSERT(g_zcopy_write_buf != NULL); 268 CU_ASSERT(g_zcopy_write_buf_len > 0); 269 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 270 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 271 bdev_io->u.bdev.iovcnt = 1; 272 } 273 } else { 274 if (bdev_io->u.bdev.zcopy.commit) { 275 /* End of write */ 276 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 277 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 278 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 279 g_zcopy_write_buf = NULL; 280 g_zcopy_write_buf_len = 0; 281 } else { 282 /* End of read */ 283 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 284 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 285 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 286 g_zcopy_read_buf = NULL; 287 g_zcopy_read_buf_len = 0; 288 } 289 } 290 } 291 292 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 293 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 294 } 295 296 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 297 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 298 } 299 300 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 301 ch->outstanding_io_count++; 302 303 expected_io = TAILQ_FIRST(&ch->expected_io); 304 if (expected_io == NULL) { 305 return; 306 } 307 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 308 309 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 310 CU_ASSERT(bdev_io->type == expected_io->type); 311 } 312 313 if (expected_io->md_buf != NULL) { 314 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 315 } 316 317 if (expected_io->length == 0) { 318 free(expected_io); 319 return; 320 } 321 322 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 323 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 324 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 325 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 326 } 327 328 if (expected_io->iovcnt == 0) { 329 free(expected_io); 330 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 331 return; 332 } 333 334 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 335 for (i = 0; i < expected_io->iovcnt; i++) { 336 expected_iov = &expected_io->iov[i]; 337 if (bdev_io->internal.orig_iovcnt == 0) { 338 iov = &bdev_io->u.bdev.iovs[i]; 339 } else { 340 iov = bdev_io->internal.orig_iovs; 341 } 342 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 343 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 344 } 345 346 free(expected_io); 347 } 348 349 static void 350 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 351 struct spdk_bdev_io *bdev_io, bool success) 352 { 353 CU_ASSERT(success == true); 354 355 stub_submit_request(_ch, bdev_io); 356 } 357 358 static void 359 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 360 { 361 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 362 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 363 } 364 365 static uint32_t 366 stub_complete_io(uint32_t num_to_complete) 367 { 368 struct bdev_ut_channel *ch = g_bdev_ut_channel; 369 struct spdk_bdev_io *bdev_io; 370 static enum spdk_bdev_io_status io_status; 371 uint32_t num_completed = 0; 372 373 while (num_completed < num_to_complete) { 374 if (TAILQ_EMPTY(&ch->outstanding_io)) { 375 break; 376 } 377 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 378 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 379 ch->outstanding_io_count--; 380 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 381 g_io_exp_status; 382 spdk_bdev_io_complete(bdev_io, io_status); 383 num_completed++; 384 } 385 386 return num_completed; 387 } 388 389 static struct spdk_io_channel * 390 bdev_ut_get_io_channel(void *ctx) 391 { 392 return spdk_get_io_channel(&g_bdev_ut_io_device); 393 } 394 395 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 396 [SPDK_BDEV_IO_TYPE_READ] = true, 397 [SPDK_BDEV_IO_TYPE_WRITE] = true, 398 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 399 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 400 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 401 [SPDK_BDEV_IO_TYPE_RESET] = true, 402 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 403 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 404 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 405 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 406 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 407 [SPDK_BDEV_IO_TYPE_ABORT] = true, 408 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 409 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 410 [SPDK_BDEV_IO_TYPE_COPY] = true, 411 }; 412 413 static void 414 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 415 { 416 g_io_types_supported[io_type] = enable; 417 } 418 419 static bool 420 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 421 { 422 return g_io_types_supported[io_type]; 423 } 424 425 static struct spdk_bdev_fn_table fn_table = { 426 .destruct = stub_destruct, 427 .submit_request = stub_submit_request, 428 .get_io_channel = bdev_ut_get_io_channel, 429 .io_type_supported = stub_io_type_supported, 430 }; 431 432 static int 433 bdev_ut_create_ch(void *io_device, void *ctx_buf) 434 { 435 struct bdev_ut_channel *ch = ctx_buf; 436 437 CU_ASSERT(g_bdev_ut_channel == NULL); 438 g_bdev_ut_channel = ch; 439 440 TAILQ_INIT(&ch->outstanding_io); 441 ch->outstanding_io_count = 0; 442 TAILQ_INIT(&ch->expected_io); 443 return 0; 444 } 445 446 static void 447 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 448 { 449 CU_ASSERT(g_bdev_ut_channel != NULL); 450 g_bdev_ut_channel = NULL; 451 } 452 453 struct spdk_bdev_module bdev_ut_if; 454 455 static int 456 bdev_ut_module_init(void) 457 { 458 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 459 sizeof(struct bdev_ut_channel), NULL); 460 spdk_bdev_module_init_done(&bdev_ut_if); 461 return 0; 462 } 463 464 static void 465 bdev_ut_module_fini(void) 466 { 467 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 468 } 469 470 struct spdk_bdev_module bdev_ut_if = { 471 .name = "bdev_ut", 472 .module_init = bdev_ut_module_init, 473 .module_fini = bdev_ut_module_fini, 474 .async_init = true, 475 }; 476 477 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 478 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 479 480 static int 481 vbdev_ut_module_init(void) 482 { 483 return 0; 484 } 485 486 static void 487 vbdev_ut_module_fini(void) 488 { 489 } 490 491 struct spdk_bdev_module vbdev_ut_if = { 492 .name = "vbdev_ut", 493 .module_init = vbdev_ut_module_init, 494 .module_fini = vbdev_ut_module_fini, 495 .examine_config = vbdev_ut_examine_config, 496 .examine_disk = vbdev_ut_examine_disk, 497 }; 498 499 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 500 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 501 502 struct ut_examine_ctx { 503 void (*examine_config)(struct spdk_bdev *bdev); 504 void (*examine_disk)(struct spdk_bdev *bdev); 505 uint32_t examine_config_count; 506 uint32_t examine_disk_count; 507 }; 508 509 static void 510 vbdev_ut_examine_config(struct spdk_bdev *bdev) 511 { 512 struct ut_examine_ctx *ctx = bdev->ctxt; 513 514 if (ctx != NULL) { 515 ctx->examine_config_count++; 516 if (ctx->examine_config != NULL) { 517 ctx->examine_config(bdev); 518 } 519 } 520 521 spdk_bdev_module_examine_done(&vbdev_ut_if); 522 } 523 524 static void 525 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 526 { 527 struct ut_examine_ctx *ctx = bdev->ctxt; 528 529 if (ctx != NULL) { 530 ctx->examine_disk_count++; 531 if (ctx->examine_disk != NULL) { 532 ctx->examine_disk(bdev); 533 } 534 } 535 536 spdk_bdev_module_examine_done(&vbdev_ut_if); 537 } 538 539 static void 540 bdev_init_cb(void *arg, int rc) 541 { 542 CU_ASSERT(rc == 0); 543 } 544 545 static void 546 bdev_fini_cb(void *arg) 547 { 548 } 549 550 static void 551 ut_init_bdev(struct spdk_bdev_opts *opts) 552 { 553 int rc; 554 555 if (opts != NULL) { 556 rc = spdk_bdev_set_opts(opts); 557 CU_ASSERT(rc == 0); 558 } 559 rc = spdk_iobuf_initialize(); 560 CU_ASSERT(rc == 0); 561 spdk_bdev_initialize(bdev_init_cb, NULL); 562 poll_threads(); 563 } 564 565 static void 566 ut_fini_bdev(void) 567 { 568 spdk_bdev_finish(bdev_fini_cb, NULL); 569 spdk_iobuf_finish(bdev_fini_cb, NULL); 570 poll_threads(); 571 } 572 573 static struct spdk_bdev * 574 allocate_bdev_ctx(char *name, void *ctx) 575 { 576 struct spdk_bdev *bdev; 577 int rc; 578 579 bdev = calloc(1, sizeof(*bdev)); 580 SPDK_CU_ASSERT_FATAL(bdev != NULL); 581 582 bdev->ctxt = ctx; 583 bdev->name = name; 584 bdev->fn_table = &fn_table; 585 bdev->module = &bdev_ut_if; 586 bdev->blockcnt = 1024; 587 bdev->blocklen = 512; 588 589 spdk_uuid_generate(&bdev->uuid); 590 591 rc = spdk_bdev_register(bdev); 592 poll_threads(); 593 CU_ASSERT(rc == 0); 594 595 return bdev; 596 } 597 598 static struct spdk_bdev * 599 allocate_bdev(char *name) 600 { 601 return allocate_bdev_ctx(name, NULL); 602 } 603 604 static struct spdk_bdev * 605 allocate_vbdev(char *name) 606 { 607 struct spdk_bdev *bdev; 608 int rc; 609 610 bdev = calloc(1, sizeof(*bdev)); 611 SPDK_CU_ASSERT_FATAL(bdev != NULL); 612 613 bdev->name = name; 614 bdev->fn_table = &fn_table; 615 bdev->module = &vbdev_ut_if; 616 bdev->blockcnt = 1024; 617 bdev->blocklen = 512; 618 619 rc = spdk_bdev_register(bdev); 620 poll_threads(); 621 CU_ASSERT(rc == 0); 622 623 return bdev; 624 } 625 626 static void 627 free_bdev(struct spdk_bdev *bdev) 628 { 629 spdk_bdev_unregister(bdev, NULL, NULL); 630 poll_threads(); 631 memset(bdev, 0xFF, sizeof(*bdev)); 632 free(bdev); 633 } 634 635 static void 636 free_vbdev(struct spdk_bdev *bdev) 637 { 638 spdk_bdev_unregister(bdev, NULL, NULL); 639 poll_threads(); 640 memset(bdev, 0xFF, sizeof(*bdev)); 641 free(bdev); 642 } 643 644 static void 645 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 646 { 647 const char *bdev_name; 648 649 CU_ASSERT(bdev != NULL); 650 CU_ASSERT(rc == 0); 651 bdev_name = spdk_bdev_get_name(bdev); 652 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 653 654 free(stat); 655 656 *(bool *)cb_arg = true; 657 } 658 659 static void 660 bdev_unregister_cb(void *cb_arg, int rc) 661 { 662 g_unregister_arg = cb_arg; 663 g_unregister_rc = rc; 664 } 665 666 static void 667 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 668 { 669 } 670 671 static void 672 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 673 { 674 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 675 676 g_event_type1 = type; 677 if (SPDK_BDEV_EVENT_REMOVE == type) { 678 spdk_bdev_close(desc); 679 } 680 } 681 682 static void 683 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 684 { 685 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 686 687 g_event_type2 = type; 688 if (SPDK_BDEV_EVENT_REMOVE == type) { 689 spdk_bdev_close(desc); 690 } 691 } 692 693 static void 694 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 695 { 696 g_event_type3 = type; 697 } 698 699 static void 700 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 701 { 702 g_event_type4 = type; 703 } 704 705 static void 706 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 707 { 708 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 709 spdk_bdev_free_io(bdev_io); 710 } 711 712 static void 713 get_device_stat_test(void) 714 { 715 struct spdk_bdev *bdev; 716 struct spdk_bdev_io_stat *stat; 717 bool done; 718 719 bdev = allocate_bdev("bdev0"); 720 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 721 if (stat == NULL) { 722 free_bdev(bdev); 723 return; 724 } 725 726 done = false; 727 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 728 while (!done) { poll_threads(); } 729 730 free_bdev(bdev); 731 } 732 733 static void 734 open_write_test(void) 735 { 736 struct spdk_bdev *bdev[9]; 737 struct spdk_bdev_desc *desc[9] = {}; 738 int rc; 739 740 ut_init_bdev(NULL); 741 742 /* 743 * Create a tree of bdevs to test various open w/ write cases. 744 * 745 * bdev0 through bdev3 are physical block devices, such as NVMe 746 * namespaces or Ceph block devices. 747 * 748 * bdev4 is a virtual bdev with multiple base bdevs. This models 749 * caching or RAID use cases. 750 * 751 * bdev5 through bdev7 are all virtual bdevs with the same base 752 * bdev (except bdev7). This models partitioning or logical volume 753 * use cases. 754 * 755 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 756 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 757 * models caching, RAID, partitioning or logical volumes use cases. 758 * 759 * bdev8 is a virtual bdev with multiple base bdevs, but these 760 * base bdevs are themselves virtual bdevs. 761 * 762 * bdev8 763 * | 764 * +----------+ 765 * | | 766 * bdev4 bdev5 bdev6 bdev7 767 * | | | | 768 * +---+---+ +---+ + +---+---+ 769 * | | \ | / \ 770 * bdev0 bdev1 bdev2 bdev3 771 */ 772 773 bdev[0] = allocate_bdev("bdev0"); 774 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 775 CU_ASSERT(rc == 0); 776 777 bdev[1] = allocate_bdev("bdev1"); 778 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 779 CU_ASSERT(rc == 0); 780 781 bdev[2] = allocate_bdev("bdev2"); 782 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 783 CU_ASSERT(rc == 0); 784 785 bdev[3] = allocate_bdev("bdev3"); 786 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 787 CU_ASSERT(rc == 0); 788 789 bdev[4] = allocate_vbdev("bdev4"); 790 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 791 CU_ASSERT(rc == 0); 792 793 bdev[5] = allocate_vbdev("bdev5"); 794 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 795 CU_ASSERT(rc == 0); 796 797 bdev[6] = allocate_vbdev("bdev6"); 798 799 bdev[7] = allocate_vbdev("bdev7"); 800 801 bdev[8] = allocate_vbdev("bdev8"); 802 803 /* Open bdev0 read-only. This should succeed. */ 804 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 805 CU_ASSERT(rc == 0); 806 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 807 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 808 spdk_bdev_close(desc[0]); 809 810 /* 811 * Open bdev1 read/write. This should fail since bdev1 has been claimed 812 * by a vbdev module. 813 */ 814 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 815 CU_ASSERT(rc == -EPERM); 816 817 /* 818 * Open bdev4 read/write. This should fail since bdev3 has been claimed 819 * by a vbdev module. 820 */ 821 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 822 CU_ASSERT(rc == -EPERM); 823 824 /* Open bdev4 read-only. This should succeed. */ 825 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 826 CU_ASSERT(rc == 0); 827 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 828 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 829 spdk_bdev_close(desc[4]); 830 831 /* 832 * Open bdev8 read/write. This should succeed since it is a leaf 833 * bdev. 834 */ 835 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 836 CU_ASSERT(rc == 0); 837 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 838 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 839 spdk_bdev_close(desc[8]); 840 841 /* 842 * Open bdev5 read/write. This should fail since bdev4 has been claimed 843 * by a vbdev module. 844 */ 845 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 846 CU_ASSERT(rc == -EPERM); 847 848 /* Open bdev4 read-only. This should succeed. */ 849 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 850 CU_ASSERT(rc == 0); 851 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 852 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 853 spdk_bdev_close(desc[5]); 854 855 free_vbdev(bdev[8]); 856 857 free_vbdev(bdev[5]); 858 free_vbdev(bdev[6]); 859 free_vbdev(bdev[7]); 860 861 free_vbdev(bdev[4]); 862 863 free_bdev(bdev[0]); 864 free_bdev(bdev[1]); 865 free_bdev(bdev[2]); 866 free_bdev(bdev[3]); 867 868 ut_fini_bdev(); 869 } 870 871 static void 872 claim_test(void) 873 { 874 struct spdk_bdev *bdev; 875 struct spdk_bdev_desc *desc, *open_desc; 876 int rc; 877 uint32_t count; 878 879 ut_init_bdev(NULL); 880 881 /* 882 * A vbdev that uses a read-only bdev may need it to remain read-only. 883 * To do so, it opens the bdev read-only, then claims it without 884 * passing a spdk_bdev_desc. 885 */ 886 bdev = allocate_bdev("bdev0"); 887 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 888 CU_ASSERT(rc == 0); 889 CU_ASSERT(desc->write == false); 890 891 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 892 CU_ASSERT(rc == 0); 893 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 894 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 895 896 /* There should be only one open descriptor and it should still be ro */ 897 count = 0; 898 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 899 CU_ASSERT(open_desc == desc); 900 CU_ASSERT(!open_desc->write); 901 count++; 902 } 903 CU_ASSERT(count == 1); 904 905 /* A read-only bdev is upgraded to read-write if desc is passed. */ 906 spdk_bdev_module_release_bdev(bdev); 907 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 908 CU_ASSERT(rc == 0); 909 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 910 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 911 912 /* There should be only one open descriptor and it should be rw */ 913 count = 0; 914 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 915 CU_ASSERT(open_desc == desc); 916 CU_ASSERT(open_desc->write); 917 count++; 918 } 919 CU_ASSERT(count == 1); 920 921 spdk_bdev_close(desc); 922 free_bdev(bdev); 923 ut_fini_bdev(); 924 } 925 926 static void 927 bytes_to_blocks_test(void) 928 { 929 struct spdk_bdev bdev; 930 uint64_t offset_blocks, num_blocks; 931 932 memset(&bdev, 0, sizeof(bdev)); 933 934 bdev.blocklen = 512; 935 936 /* All parameters valid */ 937 offset_blocks = 0; 938 num_blocks = 0; 939 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 940 CU_ASSERT(offset_blocks == 1); 941 CU_ASSERT(num_blocks == 2); 942 943 /* Offset not a block multiple */ 944 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 945 946 /* Length not a block multiple */ 947 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 948 949 /* In case blocklen not the power of two */ 950 bdev.blocklen = 100; 951 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 952 CU_ASSERT(offset_blocks == 1); 953 CU_ASSERT(num_blocks == 2); 954 955 /* Offset not a block multiple */ 956 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 957 958 /* Length not a block multiple */ 959 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 960 } 961 962 static void 963 num_blocks_test(void) 964 { 965 struct spdk_bdev *bdev; 966 struct spdk_bdev_desc *desc = NULL; 967 int rc; 968 969 ut_init_bdev(NULL); 970 bdev = allocate_bdev("num_blocks"); 971 972 spdk_bdev_notify_blockcnt_change(bdev, 50); 973 974 /* Growing block number */ 975 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0); 976 /* Shrinking block number */ 977 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0); 978 979 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 980 CU_ASSERT(rc == 0); 981 SPDK_CU_ASSERT_FATAL(desc != NULL); 982 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 983 984 /* Growing block number */ 985 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0); 986 /* Shrinking block number */ 987 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0); 988 989 g_event_type1 = 0xFF; 990 /* Growing block number */ 991 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0); 992 993 poll_threads(); 994 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 995 996 g_event_type1 = 0xFF; 997 /* Growing block number and closing */ 998 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0); 999 1000 spdk_bdev_close(desc); 1001 free_bdev(bdev); 1002 ut_fini_bdev(); 1003 1004 poll_threads(); 1005 1006 /* Callback is not called for closed device */ 1007 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 1008 } 1009 1010 static void 1011 io_valid_test(void) 1012 { 1013 struct spdk_bdev bdev; 1014 1015 memset(&bdev, 0, sizeof(bdev)); 1016 1017 bdev.blocklen = 512; 1018 spdk_spin_init(&bdev.internal.spinlock); 1019 1020 spdk_bdev_notify_blockcnt_change(&bdev, 100); 1021 1022 /* All parameters valid */ 1023 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 1024 1025 /* Last valid block */ 1026 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 1027 1028 /* Offset past end of bdev */ 1029 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 1030 1031 /* Offset + length past end of bdev */ 1032 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 1033 1034 /* Offset near end of uint64_t range (2^64 - 1) */ 1035 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 1036 1037 spdk_spin_destroy(&bdev.internal.spinlock); 1038 } 1039 1040 static void 1041 alias_add_del_test(void) 1042 { 1043 struct spdk_bdev *bdev[3]; 1044 int rc; 1045 1046 ut_init_bdev(NULL); 1047 1048 /* Creating and registering bdevs */ 1049 bdev[0] = allocate_bdev("bdev0"); 1050 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 1051 1052 bdev[1] = allocate_bdev("bdev1"); 1053 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 1054 1055 bdev[2] = allocate_bdev("bdev2"); 1056 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 1057 1058 poll_threads(); 1059 1060 /* 1061 * Trying adding an alias identical to name. 1062 * Alias is identical to name, so it can not be added to aliases list 1063 */ 1064 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 1065 CU_ASSERT(rc == -EEXIST); 1066 1067 /* 1068 * Trying to add empty alias, 1069 * this one should fail 1070 */ 1071 rc = spdk_bdev_alias_add(bdev[0], NULL); 1072 CU_ASSERT(rc == -EINVAL); 1073 1074 /* Trying adding same alias to two different registered bdevs */ 1075 1076 /* Alias is used first time, so this one should pass */ 1077 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1078 CU_ASSERT(rc == 0); 1079 1080 /* Alias was added to another bdev, so this one should fail */ 1081 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1082 CU_ASSERT(rc == -EEXIST); 1083 1084 /* Alias is used first time, so this one should pass */ 1085 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1086 CU_ASSERT(rc == 0); 1087 1088 /* Trying removing an alias from registered bdevs */ 1089 1090 /* Alias is not on a bdev aliases list, so this one should fail */ 1091 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1092 CU_ASSERT(rc == -ENOENT); 1093 1094 /* Alias is present on a bdev aliases list, so this one should pass */ 1095 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1096 CU_ASSERT(rc == 0); 1097 1098 /* Alias is present on a bdev aliases list, so this one should pass */ 1099 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1100 CU_ASSERT(rc == 0); 1101 1102 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1103 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1104 CU_ASSERT(rc != 0); 1105 1106 /* Trying to del all alias from empty alias list */ 1107 spdk_bdev_alias_del_all(bdev[2]); 1108 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1109 1110 /* Trying to del all alias from non-empty alias list */ 1111 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1112 CU_ASSERT(rc == 0); 1113 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1114 CU_ASSERT(rc == 0); 1115 spdk_bdev_alias_del_all(bdev[2]); 1116 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1117 1118 /* Unregister and free bdevs */ 1119 spdk_bdev_unregister(bdev[0], NULL, NULL); 1120 spdk_bdev_unregister(bdev[1], NULL, NULL); 1121 spdk_bdev_unregister(bdev[2], NULL, NULL); 1122 1123 poll_threads(); 1124 1125 free(bdev[0]); 1126 free(bdev[1]); 1127 free(bdev[2]); 1128 1129 ut_fini_bdev(); 1130 } 1131 1132 static void 1133 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1134 { 1135 g_io_done = true; 1136 g_io_status = bdev_io->internal.status; 1137 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1138 (bdev_io->u.bdev.zcopy.start)) { 1139 g_zcopy_bdev_io = bdev_io; 1140 } else { 1141 spdk_bdev_free_io(bdev_io); 1142 g_zcopy_bdev_io = NULL; 1143 } 1144 } 1145 1146 struct bdev_ut_io_wait_entry { 1147 struct spdk_bdev_io_wait_entry entry; 1148 struct spdk_io_channel *io_ch; 1149 struct spdk_bdev_desc *desc; 1150 bool submitted; 1151 }; 1152 1153 static void 1154 io_wait_cb(void *arg) 1155 { 1156 struct bdev_ut_io_wait_entry *entry = arg; 1157 int rc; 1158 1159 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1160 CU_ASSERT(rc == 0); 1161 entry->submitted = true; 1162 } 1163 1164 static void 1165 bdev_io_types_test(void) 1166 { 1167 struct spdk_bdev *bdev; 1168 struct spdk_bdev_desc *desc = NULL; 1169 struct spdk_io_channel *io_ch; 1170 struct spdk_bdev_opts bdev_opts = {}; 1171 int rc; 1172 1173 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1174 bdev_opts.bdev_io_pool_size = 4; 1175 bdev_opts.bdev_io_cache_size = 2; 1176 ut_init_bdev(&bdev_opts); 1177 1178 bdev = allocate_bdev("bdev0"); 1179 1180 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1181 CU_ASSERT(rc == 0); 1182 poll_threads(); 1183 SPDK_CU_ASSERT_FATAL(desc != NULL); 1184 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1185 io_ch = spdk_bdev_get_io_channel(desc); 1186 CU_ASSERT(io_ch != NULL); 1187 1188 /* WRITE and WRITE ZEROES are not supported */ 1189 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1190 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1191 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1192 CU_ASSERT(rc == -ENOTSUP); 1193 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1194 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1195 1196 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1197 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1198 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1199 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1200 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1201 CU_ASSERT(rc == -ENOTSUP); 1202 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1203 CU_ASSERT(rc == -ENOTSUP); 1204 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1205 CU_ASSERT(rc == -ENOTSUP); 1206 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1207 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1208 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1209 1210 spdk_put_io_channel(io_ch); 1211 spdk_bdev_close(desc); 1212 free_bdev(bdev); 1213 ut_fini_bdev(); 1214 } 1215 1216 static void 1217 bdev_io_wait_test(void) 1218 { 1219 struct spdk_bdev *bdev; 1220 struct spdk_bdev_desc *desc = NULL; 1221 struct spdk_io_channel *io_ch; 1222 struct spdk_bdev_opts bdev_opts = {}; 1223 struct bdev_ut_io_wait_entry io_wait_entry; 1224 struct bdev_ut_io_wait_entry io_wait_entry2; 1225 int rc; 1226 1227 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1228 bdev_opts.bdev_io_pool_size = 4; 1229 bdev_opts.bdev_io_cache_size = 2; 1230 ut_init_bdev(&bdev_opts); 1231 1232 bdev = allocate_bdev("bdev0"); 1233 1234 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1235 CU_ASSERT(rc == 0); 1236 poll_threads(); 1237 SPDK_CU_ASSERT_FATAL(desc != NULL); 1238 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1239 io_ch = spdk_bdev_get_io_channel(desc); 1240 CU_ASSERT(io_ch != NULL); 1241 1242 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1243 CU_ASSERT(rc == 0); 1244 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1245 CU_ASSERT(rc == 0); 1246 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1247 CU_ASSERT(rc == 0); 1248 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1249 CU_ASSERT(rc == 0); 1250 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1251 1252 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1253 CU_ASSERT(rc == -ENOMEM); 1254 1255 io_wait_entry.entry.bdev = bdev; 1256 io_wait_entry.entry.cb_fn = io_wait_cb; 1257 io_wait_entry.entry.cb_arg = &io_wait_entry; 1258 io_wait_entry.io_ch = io_ch; 1259 io_wait_entry.desc = desc; 1260 io_wait_entry.submitted = false; 1261 /* Cannot use the same io_wait_entry for two different calls. */ 1262 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1263 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1264 1265 /* Queue two I/O waits. */ 1266 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1267 CU_ASSERT(rc == 0); 1268 CU_ASSERT(io_wait_entry.submitted == false); 1269 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1270 CU_ASSERT(rc == 0); 1271 CU_ASSERT(io_wait_entry2.submitted == false); 1272 1273 stub_complete_io(1); 1274 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1275 CU_ASSERT(io_wait_entry.submitted == true); 1276 CU_ASSERT(io_wait_entry2.submitted == false); 1277 1278 stub_complete_io(1); 1279 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1280 CU_ASSERT(io_wait_entry2.submitted == true); 1281 1282 stub_complete_io(4); 1283 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1284 1285 spdk_put_io_channel(io_ch); 1286 spdk_bdev_close(desc); 1287 free_bdev(bdev); 1288 ut_fini_bdev(); 1289 } 1290 1291 static void 1292 bdev_io_spans_split_test(void) 1293 { 1294 struct spdk_bdev bdev; 1295 struct spdk_bdev_io bdev_io; 1296 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1297 1298 memset(&bdev, 0, sizeof(bdev)); 1299 bdev_io.u.bdev.iovs = iov; 1300 1301 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1302 bdev.optimal_io_boundary = 0; 1303 bdev.max_segment_size = 0; 1304 bdev.max_num_segments = 0; 1305 bdev_io.bdev = &bdev; 1306 1307 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1308 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1309 1310 bdev.split_on_optimal_io_boundary = true; 1311 bdev.optimal_io_boundary = 32; 1312 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1313 1314 /* RESETs are not based on LBAs - so this should return false. */ 1315 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1316 1317 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1318 bdev_io.u.bdev.offset_blocks = 0; 1319 bdev_io.u.bdev.num_blocks = 32; 1320 1321 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1322 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1323 1324 bdev_io.u.bdev.num_blocks = 33; 1325 1326 /* This I/O spans a boundary. */ 1327 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1328 1329 bdev_io.u.bdev.num_blocks = 32; 1330 bdev.max_segment_size = 512 * 32; 1331 bdev.max_num_segments = 1; 1332 bdev_io.u.bdev.iovcnt = 1; 1333 iov[0].iov_len = 512; 1334 1335 /* Does not cross and exceed max_size or max_segs */ 1336 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1337 1338 bdev.split_on_optimal_io_boundary = false; 1339 bdev.max_segment_size = 512; 1340 bdev.max_num_segments = 1; 1341 bdev_io.u.bdev.iovcnt = 2; 1342 1343 /* Exceed max_segs */ 1344 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1345 1346 bdev.max_num_segments = 2; 1347 iov[0].iov_len = 513; 1348 iov[1].iov_len = 512; 1349 1350 /* Exceed max_sizes */ 1351 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1352 1353 bdev.max_segment_size = 0; 1354 bdev.write_unit_size = 32; 1355 bdev.split_on_write_unit = true; 1356 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1357 1358 /* This I/O is one write unit */ 1359 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1360 1361 bdev_io.u.bdev.num_blocks = 32 * 2; 1362 1363 /* This I/O is more than one write unit */ 1364 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1365 1366 bdev_io.u.bdev.offset_blocks = 1; 1367 bdev_io.u.bdev.num_blocks = 32; 1368 1369 /* This I/O is not aligned to write unit size */ 1370 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1371 } 1372 1373 static void 1374 bdev_io_boundary_split_test(void) 1375 { 1376 struct spdk_bdev *bdev; 1377 struct spdk_bdev_desc *desc = NULL; 1378 struct spdk_io_channel *io_ch; 1379 struct spdk_bdev_opts bdev_opts = {}; 1380 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1381 struct ut_expected_io *expected_io; 1382 void *md_buf = (void *)0xFF000000; 1383 uint64_t i; 1384 int rc; 1385 1386 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1387 bdev_opts.bdev_io_pool_size = 512; 1388 bdev_opts.bdev_io_cache_size = 64; 1389 ut_init_bdev(&bdev_opts); 1390 1391 bdev = allocate_bdev("bdev0"); 1392 1393 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1394 CU_ASSERT(rc == 0); 1395 SPDK_CU_ASSERT_FATAL(desc != NULL); 1396 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1397 io_ch = spdk_bdev_get_io_channel(desc); 1398 CU_ASSERT(io_ch != NULL); 1399 1400 bdev->optimal_io_boundary = 16; 1401 bdev->split_on_optimal_io_boundary = false; 1402 1403 g_io_done = false; 1404 1405 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1406 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1407 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1408 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1409 1410 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1411 CU_ASSERT(rc == 0); 1412 CU_ASSERT(g_io_done == false); 1413 1414 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1415 stub_complete_io(1); 1416 CU_ASSERT(g_io_done == true); 1417 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1418 1419 bdev->split_on_optimal_io_boundary = true; 1420 bdev->md_interleave = false; 1421 bdev->md_len = 8; 1422 1423 /* Now test that a single-vector command is split correctly. 1424 * Offset 14, length 8, payload 0xF000 1425 * Child - Offset 14, length 2, payload 0xF000 1426 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1427 * 1428 * Set up the expected values before calling spdk_bdev_read_blocks 1429 */ 1430 g_io_done = false; 1431 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1432 expected_io->md_buf = md_buf; 1433 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1434 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1435 1436 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1437 expected_io->md_buf = md_buf + 2 * 8; 1438 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1439 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1440 1441 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1442 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1443 14, 8, io_done, NULL); 1444 CU_ASSERT(rc == 0); 1445 CU_ASSERT(g_io_done == false); 1446 1447 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1448 stub_complete_io(2); 1449 CU_ASSERT(g_io_done == true); 1450 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1451 1452 /* Now set up a more complex, multi-vector command that needs to be split, 1453 * including splitting iovecs. 1454 */ 1455 iov[0].iov_base = (void *)0x10000; 1456 iov[0].iov_len = 512; 1457 iov[1].iov_base = (void *)0x20000; 1458 iov[1].iov_len = 20 * 512; 1459 iov[2].iov_base = (void *)0x30000; 1460 iov[2].iov_len = 11 * 512; 1461 1462 g_io_done = false; 1463 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1464 expected_io->md_buf = md_buf; 1465 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1466 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1467 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1468 1469 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1470 expected_io->md_buf = md_buf + 2 * 8; 1471 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1472 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1473 1474 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1475 expected_io->md_buf = md_buf + 18 * 8; 1476 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1477 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1478 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1479 1480 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1481 14, 32, io_done, NULL); 1482 CU_ASSERT(rc == 0); 1483 CU_ASSERT(g_io_done == false); 1484 1485 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1486 stub_complete_io(3); 1487 CU_ASSERT(g_io_done == true); 1488 1489 /* Test multi vector command that needs to be split by strip and then needs to be 1490 * split further due to the capacity of child iovs. 1491 */ 1492 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1493 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1494 iov[i].iov_len = 512; 1495 } 1496 1497 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1498 g_io_done = false; 1499 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1500 SPDK_BDEV_IO_NUM_CHILD_IOV); 1501 expected_io->md_buf = md_buf; 1502 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1503 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1504 } 1505 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1506 1507 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1508 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1509 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1510 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1511 ut_expected_io_set_iov(expected_io, i, 1512 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1513 } 1514 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1515 1516 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1517 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1518 CU_ASSERT(rc == 0); 1519 CU_ASSERT(g_io_done == false); 1520 1521 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1522 stub_complete_io(1); 1523 CU_ASSERT(g_io_done == false); 1524 1525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1526 stub_complete_io(1); 1527 CU_ASSERT(g_io_done == true); 1528 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1529 1530 /* Test multi vector command that needs to be split by strip and then needs to be 1531 * split further due to the capacity of child iovs. In this case, the length of 1532 * the rest of iovec array with an I/O boundary is the multiple of block size. 1533 */ 1534 1535 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1536 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1537 */ 1538 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1539 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1540 iov[i].iov_len = 512; 1541 } 1542 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1543 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1544 iov[i].iov_len = 256; 1545 } 1546 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1547 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1548 1549 /* Add an extra iovec to trigger split */ 1550 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1551 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1552 1553 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1554 g_io_done = false; 1555 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1556 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1557 expected_io->md_buf = md_buf; 1558 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1559 ut_expected_io_set_iov(expected_io, i, 1560 (void *)((i + 1) * 0x10000), 512); 1561 } 1562 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1563 ut_expected_io_set_iov(expected_io, i, 1564 (void *)((i + 1) * 0x10000), 256); 1565 } 1566 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1567 1568 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1569 1, 1); 1570 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1571 ut_expected_io_set_iov(expected_io, 0, 1572 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1573 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1574 1575 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1576 1, 1); 1577 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1578 ut_expected_io_set_iov(expected_io, 0, 1579 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1580 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1581 1582 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1583 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1584 CU_ASSERT(rc == 0); 1585 CU_ASSERT(g_io_done == false); 1586 1587 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1588 stub_complete_io(1); 1589 CU_ASSERT(g_io_done == false); 1590 1591 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1592 stub_complete_io(2); 1593 CU_ASSERT(g_io_done == true); 1594 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1595 1596 /* Test multi vector command that needs to be split by strip and then needs to be 1597 * split further due to the capacity of child iovs, the child request offset should 1598 * be rewind to last aligned offset and go success without error. 1599 */ 1600 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1601 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1602 iov[i].iov_len = 512; 1603 } 1604 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1605 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1606 1607 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1608 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1609 1610 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1611 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1612 1613 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1614 g_io_done = false; 1615 g_io_status = 0; 1616 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1617 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1618 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1619 expected_io->md_buf = md_buf; 1620 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1621 ut_expected_io_set_iov(expected_io, i, 1622 (void *)((i + 1) * 0x10000), 512); 1623 } 1624 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1625 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1626 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1627 1, 2); 1628 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1629 ut_expected_io_set_iov(expected_io, 0, 1630 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1631 ut_expected_io_set_iov(expected_io, 1, 1632 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1633 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1634 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1635 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1636 1, 1); 1637 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1638 ut_expected_io_set_iov(expected_io, 0, 1639 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1640 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1641 1642 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1643 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1644 CU_ASSERT(rc == 0); 1645 CU_ASSERT(g_io_done == false); 1646 1647 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1648 stub_complete_io(1); 1649 CU_ASSERT(g_io_done == false); 1650 1651 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1652 stub_complete_io(2); 1653 CU_ASSERT(g_io_done == true); 1654 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1655 1656 /* Test multi vector command that needs to be split due to the IO boundary and 1657 * the capacity of child iovs. Especially test the case when the command is 1658 * split due to the capacity of child iovs, the tail address is not aligned with 1659 * block size and is rewinded to the aligned address. 1660 * 1661 * The iovecs used in read request is complex but is based on the data 1662 * collected in the real issue. We change the base addresses but keep the lengths 1663 * not to loose the credibility of the test. 1664 */ 1665 bdev->optimal_io_boundary = 128; 1666 g_io_done = false; 1667 g_io_status = 0; 1668 1669 for (i = 0; i < 31; i++) { 1670 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1671 iov[i].iov_len = 1024; 1672 } 1673 iov[31].iov_base = (void *)0xFEED1F00000; 1674 iov[31].iov_len = 32768; 1675 iov[32].iov_base = (void *)0xFEED2000000; 1676 iov[32].iov_len = 160; 1677 iov[33].iov_base = (void *)0xFEED2100000; 1678 iov[33].iov_len = 4096; 1679 iov[34].iov_base = (void *)0xFEED2200000; 1680 iov[34].iov_len = 4096; 1681 iov[35].iov_base = (void *)0xFEED2300000; 1682 iov[35].iov_len = 4096; 1683 iov[36].iov_base = (void *)0xFEED2400000; 1684 iov[36].iov_len = 4096; 1685 iov[37].iov_base = (void *)0xFEED2500000; 1686 iov[37].iov_len = 4096; 1687 iov[38].iov_base = (void *)0xFEED2600000; 1688 iov[38].iov_len = 4096; 1689 iov[39].iov_base = (void *)0xFEED2700000; 1690 iov[39].iov_len = 4096; 1691 iov[40].iov_base = (void *)0xFEED2800000; 1692 iov[40].iov_len = 4096; 1693 iov[41].iov_base = (void *)0xFEED2900000; 1694 iov[41].iov_len = 4096; 1695 iov[42].iov_base = (void *)0xFEED2A00000; 1696 iov[42].iov_len = 4096; 1697 iov[43].iov_base = (void *)0xFEED2B00000; 1698 iov[43].iov_len = 12288; 1699 iov[44].iov_base = (void *)0xFEED2C00000; 1700 iov[44].iov_len = 8192; 1701 iov[45].iov_base = (void *)0xFEED2F00000; 1702 iov[45].iov_len = 4096; 1703 iov[46].iov_base = (void *)0xFEED3000000; 1704 iov[46].iov_len = 4096; 1705 iov[47].iov_base = (void *)0xFEED3100000; 1706 iov[47].iov_len = 4096; 1707 iov[48].iov_base = (void *)0xFEED3200000; 1708 iov[48].iov_len = 24576; 1709 iov[49].iov_base = (void *)0xFEED3300000; 1710 iov[49].iov_len = 16384; 1711 iov[50].iov_base = (void *)0xFEED3400000; 1712 iov[50].iov_len = 12288; 1713 iov[51].iov_base = (void *)0xFEED3500000; 1714 iov[51].iov_len = 4096; 1715 iov[52].iov_base = (void *)0xFEED3600000; 1716 iov[52].iov_len = 4096; 1717 iov[53].iov_base = (void *)0xFEED3700000; 1718 iov[53].iov_len = 4096; 1719 iov[54].iov_base = (void *)0xFEED3800000; 1720 iov[54].iov_len = 28672; 1721 iov[55].iov_base = (void *)0xFEED3900000; 1722 iov[55].iov_len = 20480; 1723 iov[56].iov_base = (void *)0xFEED3A00000; 1724 iov[56].iov_len = 4096; 1725 iov[57].iov_base = (void *)0xFEED3B00000; 1726 iov[57].iov_len = 12288; 1727 iov[58].iov_base = (void *)0xFEED3C00000; 1728 iov[58].iov_len = 4096; 1729 iov[59].iov_base = (void *)0xFEED3D00000; 1730 iov[59].iov_len = 4096; 1731 iov[60].iov_base = (void *)0xFEED3E00000; 1732 iov[60].iov_len = 352; 1733 1734 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1735 * of child iovs, 1736 */ 1737 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1738 expected_io->md_buf = md_buf; 1739 for (i = 0; i < 32; i++) { 1740 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1741 } 1742 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1743 1744 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1745 * split by the IO boundary requirement. 1746 */ 1747 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1748 expected_io->md_buf = md_buf + 126 * 8; 1749 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1750 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1751 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1752 1753 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1754 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1755 */ 1756 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1757 expected_io->md_buf = md_buf + 128 * 8; 1758 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1759 iov[33].iov_len - 864); 1760 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1761 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1762 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1763 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1764 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1765 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1766 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1767 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1768 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1769 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1770 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1771 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1772 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1773 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1774 1775 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1776 * first 864 bytes of iov[52] split by the IO boundary requirement. 1777 */ 1778 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1779 expected_io->md_buf = md_buf + 256 * 8; 1780 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1781 iov[46].iov_len - 864); 1782 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1783 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1784 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1785 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1786 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1787 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1788 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1789 1790 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1791 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1792 */ 1793 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1794 expected_io->md_buf = md_buf + 384 * 8; 1795 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1796 iov[52].iov_len - 864); 1797 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1798 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1799 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1800 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1801 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1802 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1803 1804 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1805 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1806 */ 1807 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1808 expected_io->md_buf = md_buf + 512 * 8; 1809 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1810 iov[57].iov_len - 4960); 1811 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1812 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1813 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1814 1815 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1816 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1817 expected_io->md_buf = md_buf + 542 * 8; 1818 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1819 iov[59].iov_len - 3936); 1820 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1821 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1822 1823 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1824 0, 543, io_done, NULL); 1825 CU_ASSERT(rc == 0); 1826 CU_ASSERT(g_io_done == false); 1827 1828 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1829 stub_complete_io(1); 1830 CU_ASSERT(g_io_done == false); 1831 1832 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1833 stub_complete_io(5); 1834 CU_ASSERT(g_io_done == false); 1835 1836 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1837 stub_complete_io(1); 1838 CU_ASSERT(g_io_done == true); 1839 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1840 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1841 1842 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1843 * split, so test that. 1844 */ 1845 bdev->optimal_io_boundary = 15; 1846 g_io_done = false; 1847 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1848 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1849 1850 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1851 CU_ASSERT(rc == 0); 1852 CU_ASSERT(g_io_done == false); 1853 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1854 stub_complete_io(1); 1855 CU_ASSERT(g_io_done == true); 1856 1857 /* Test an UNMAP. This should also not be split. */ 1858 bdev->optimal_io_boundary = 16; 1859 g_io_done = false; 1860 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1861 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1862 1863 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1864 CU_ASSERT(rc == 0); 1865 CU_ASSERT(g_io_done == false); 1866 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1867 stub_complete_io(1); 1868 CU_ASSERT(g_io_done == true); 1869 1870 /* Test a FLUSH. This should also not be split. */ 1871 bdev->optimal_io_boundary = 16; 1872 g_io_done = false; 1873 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1874 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1875 1876 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1877 CU_ASSERT(rc == 0); 1878 CU_ASSERT(g_io_done == false); 1879 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1880 stub_complete_io(1); 1881 CU_ASSERT(g_io_done == true); 1882 1883 /* Test a COPY. This should also not be split. */ 1884 bdev->optimal_io_boundary = 15; 1885 g_io_done = false; 1886 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1887 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1888 1889 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1890 CU_ASSERT(rc == 0); 1891 CU_ASSERT(g_io_done == false); 1892 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1893 stub_complete_io(1); 1894 CU_ASSERT(g_io_done == true); 1895 1896 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1897 1898 /* Children requests return an error status */ 1899 bdev->optimal_io_boundary = 16; 1900 iov[0].iov_base = (void *)0x10000; 1901 iov[0].iov_len = 512 * 64; 1902 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1903 g_io_done = false; 1904 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1905 1906 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1907 CU_ASSERT(rc == 0); 1908 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1909 stub_complete_io(4); 1910 CU_ASSERT(g_io_done == false); 1911 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1912 stub_complete_io(1); 1913 CU_ASSERT(g_io_done == true); 1914 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1915 1916 /* Test if a multi vector command terminated with failure before continuing 1917 * splitting process when one of child I/O failed. 1918 * The multi vector command is as same as the above that needs to be split by strip 1919 * and then needs to be split further due to the capacity of child iovs. 1920 */ 1921 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1922 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1923 iov[i].iov_len = 512; 1924 } 1925 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1926 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1927 1928 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1929 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1930 1931 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1932 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1933 1934 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1935 1936 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1937 g_io_done = false; 1938 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1939 1940 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1941 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1942 CU_ASSERT(rc == 0); 1943 CU_ASSERT(g_io_done == false); 1944 1945 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1946 stub_complete_io(1); 1947 CU_ASSERT(g_io_done == true); 1948 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1949 1950 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1951 1952 /* for this test we will create the following conditions to hit the code path where 1953 * we are trying to send and IO following a split that has no iovs because we had to 1954 * trim them for alignment reasons. 1955 * 1956 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1957 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1958 * position 30 and overshoot by 0x2e. 1959 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1960 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1961 * which eliniates that vector so we just send the first split IO with 30 vectors 1962 * and let the completion pick up the last 2 vectors. 1963 */ 1964 bdev->optimal_io_boundary = 32; 1965 bdev->split_on_optimal_io_boundary = true; 1966 g_io_done = false; 1967 1968 /* Init all parent IOVs to 0x212 */ 1969 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1970 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1971 iov[i].iov_len = 0x212; 1972 } 1973 1974 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1975 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1976 /* expect 0-29 to be 1:1 with the parent iov */ 1977 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1978 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1979 } 1980 1981 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1982 * where 0x1e is the amount we overshot the 16K boundary 1983 */ 1984 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1985 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1986 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1987 1988 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1989 * shortened that take it to the next boundary and then a final one to get us to 1990 * 0x4200 bytes for the IO. 1991 */ 1992 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1993 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 1994 /* position 30 picked up the remaining bytes to the next boundary */ 1995 ut_expected_io_set_iov(expected_io, 0, 1996 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1997 1998 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1999 ut_expected_io_set_iov(expected_io, 1, 2000 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 2001 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2002 2003 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 2004 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2005 CU_ASSERT(rc == 0); 2006 CU_ASSERT(g_io_done == false); 2007 2008 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2009 stub_complete_io(1); 2010 CU_ASSERT(g_io_done == false); 2011 2012 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2013 stub_complete_io(1); 2014 CU_ASSERT(g_io_done == true); 2015 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2016 2017 spdk_put_io_channel(io_ch); 2018 spdk_bdev_close(desc); 2019 free_bdev(bdev); 2020 ut_fini_bdev(); 2021 } 2022 2023 static void 2024 bdev_io_max_size_and_segment_split_test(void) 2025 { 2026 struct spdk_bdev *bdev; 2027 struct spdk_bdev_desc *desc = NULL; 2028 struct spdk_io_channel *io_ch; 2029 struct spdk_bdev_opts bdev_opts = {}; 2030 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2031 struct ut_expected_io *expected_io; 2032 uint64_t i; 2033 int rc; 2034 2035 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2036 bdev_opts.bdev_io_pool_size = 512; 2037 bdev_opts.bdev_io_cache_size = 64; 2038 bdev_opts.opts_size = sizeof(bdev_opts); 2039 ut_init_bdev(&bdev_opts); 2040 2041 bdev = allocate_bdev("bdev0"); 2042 2043 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2044 CU_ASSERT(rc == 0); 2045 SPDK_CU_ASSERT_FATAL(desc != NULL); 2046 io_ch = spdk_bdev_get_io_channel(desc); 2047 CU_ASSERT(io_ch != NULL); 2048 2049 bdev->split_on_optimal_io_boundary = false; 2050 bdev->optimal_io_boundary = 0; 2051 2052 /* Case 0 max_num_segments == 0. 2053 * but segment size 2 * 512 > 512 2054 */ 2055 bdev->max_segment_size = 512; 2056 bdev->max_num_segments = 0; 2057 g_io_done = false; 2058 2059 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2060 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2061 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2062 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2063 2064 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2065 CU_ASSERT(rc == 0); 2066 CU_ASSERT(g_io_done == false); 2067 2068 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2069 stub_complete_io(1); 2070 CU_ASSERT(g_io_done == true); 2071 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2072 2073 /* Case 1 max_segment_size == 0 2074 * but iov num 2 > 1. 2075 */ 2076 bdev->max_segment_size = 0; 2077 bdev->max_num_segments = 1; 2078 g_io_done = false; 2079 2080 iov[0].iov_base = (void *)0x10000; 2081 iov[0].iov_len = 512; 2082 iov[1].iov_base = (void *)0x20000; 2083 iov[1].iov_len = 8 * 512; 2084 2085 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2086 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2087 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2088 2089 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2090 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2091 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2092 2093 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2094 CU_ASSERT(rc == 0); 2095 CU_ASSERT(g_io_done == false); 2096 2097 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2098 stub_complete_io(2); 2099 CU_ASSERT(g_io_done == true); 2100 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2101 2102 /* Test that a non-vector command is split correctly. 2103 * Set up the expected values before calling spdk_bdev_read_blocks 2104 */ 2105 bdev->max_segment_size = 512; 2106 bdev->max_num_segments = 1; 2107 g_io_done = false; 2108 2109 /* Child IO 0 */ 2110 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2111 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2112 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2113 2114 /* Child IO 1 */ 2115 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2116 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2117 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2118 2119 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2120 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2121 CU_ASSERT(rc == 0); 2122 CU_ASSERT(g_io_done == false); 2123 2124 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2125 stub_complete_io(2); 2126 CU_ASSERT(g_io_done == true); 2127 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2128 2129 /* Now set up a more complex, multi-vector command that needs to be split, 2130 * including splitting iovecs. 2131 */ 2132 bdev->max_segment_size = 2 * 512; 2133 bdev->max_num_segments = 1; 2134 g_io_done = false; 2135 2136 iov[0].iov_base = (void *)0x10000; 2137 iov[0].iov_len = 2 * 512; 2138 iov[1].iov_base = (void *)0x20000; 2139 iov[1].iov_len = 4 * 512; 2140 iov[2].iov_base = (void *)0x30000; 2141 iov[2].iov_len = 6 * 512; 2142 2143 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2144 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2145 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2146 2147 /* Split iov[1].size to 2 iov entries then split the segments */ 2148 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2149 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2150 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2151 2152 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2153 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2154 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2155 2156 /* Split iov[2].size to 3 iov entries then split the segments */ 2157 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2158 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2159 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2160 2161 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2162 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2163 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2164 2165 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2166 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2167 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2168 2169 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2170 CU_ASSERT(rc == 0); 2171 CU_ASSERT(g_io_done == false); 2172 2173 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2174 stub_complete_io(6); 2175 CU_ASSERT(g_io_done == true); 2176 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2177 2178 /* Test multi vector command that needs to be split by strip and then needs to be 2179 * split further due to the capacity of parent IO child iovs. 2180 */ 2181 bdev->max_segment_size = 512; 2182 bdev->max_num_segments = 1; 2183 g_io_done = false; 2184 2185 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2186 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2187 iov[i].iov_len = 512 * 2; 2188 } 2189 2190 /* Each input iov.size is split into 2 iovs, 2191 * half of the input iov can fill all child iov entries of a single IO. 2192 */ 2193 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2194 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2195 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2196 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2197 2198 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2199 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2200 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2201 } 2202 2203 /* The remaining iov is split in the second round */ 2204 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2205 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2206 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2207 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2208 2209 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2210 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2211 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2212 } 2213 2214 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2215 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2216 CU_ASSERT(rc == 0); 2217 CU_ASSERT(g_io_done == false); 2218 2219 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2220 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2221 CU_ASSERT(g_io_done == false); 2222 2223 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2224 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2225 CU_ASSERT(g_io_done == true); 2226 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2227 2228 /* A wrong case, a child IO that is divided does 2229 * not meet the principle of multiples of block size, 2230 * and exits with error 2231 */ 2232 bdev->max_segment_size = 512; 2233 bdev->max_num_segments = 1; 2234 g_io_done = false; 2235 2236 iov[0].iov_base = (void *)0x10000; 2237 iov[0].iov_len = 512 + 256; 2238 iov[1].iov_base = (void *)0x20000; 2239 iov[1].iov_len = 256; 2240 2241 /* iov[0] is split to 512 and 256. 2242 * 256 is less than a block size, and it is found 2243 * in the next round of split that it is the first child IO smaller than 2244 * the block size, so the error exit 2245 */ 2246 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2247 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2248 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2249 2250 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2251 CU_ASSERT(rc == 0); 2252 CU_ASSERT(g_io_done == false); 2253 2254 /* First child IO is OK */ 2255 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2256 stub_complete_io(1); 2257 CU_ASSERT(g_io_done == true); 2258 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2259 2260 /* error exit */ 2261 stub_complete_io(1); 2262 CU_ASSERT(g_io_done == true); 2263 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2264 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2265 2266 /* Test multi vector command that needs to be split by strip and then needs to be 2267 * split further due to the capacity of child iovs. 2268 * 2269 * In this case, the last two iovs need to be split, but it will exceed the capacity 2270 * of child iovs, so it needs to wait until the first batch completed. 2271 */ 2272 bdev->max_segment_size = 512; 2273 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2274 g_io_done = false; 2275 2276 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2277 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2278 iov[i].iov_len = 512; 2279 } 2280 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2281 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2282 iov[i].iov_len = 512 * 2; 2283 } 2284 2285 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2286 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2287 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2288 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2289 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2290 } 2291 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2292 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2293 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2294 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2295 2296 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2297 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2298 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2299 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2300 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2301 2302 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2303 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2304 CU_ASSERT(rc == 0); 2305 CU_ASSERT(g_io_done == false); 2306 2307 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2308 stub_complete_io(1); 2309 CU_ASSERT(g_io_done == false); 2310 2311 /* Next round */ 2312 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2313 stub_complete_io(1); 2314 CU_ASSERT(g_io_done == true); 2315 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2316 2317 /* This case is similar to the previous one, but the io composed of 2318 * the last few entries of child iov is not enough for a blocklen, so they 2319 * cannot be put into this IO, but wait until the next time. 2320 */ 2321 bdev->max_segment_size = 512; 2322 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2323 g_io_done = false; 2324 2325 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2326 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2327 iov[i].iov_len = 512; 2328 } 2329 2330 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2331 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2332 iov[i].iov_len = 128; 2333 } 2334 2335 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2336 * Because the left 2 iov is not enough for a blocklen. 2337 */ 2338 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2339 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2340 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2341 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2342 } 2343 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2344 2345 /* The second child io waits until the end of the first child io before executing. 2346 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2347 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2348 */ 2349 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2350 1, 4); 2351 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2352 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2353 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2354 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2355 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2356 2357 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2358 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2359 CU_ASSERT(rc == 0); 2360 CU_ASSERT(g_io_done == false); 2361 2362 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2363 stub_complete_io(1); 2364 CU_ASSERT(g_io_done == false); 2365 2366 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2367 stub_complete_io(1); 2368 CU_ASSERT(g_io_done == true); 2369 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2370 2371 /* A very complicated case. Each sg entry exceeds max_segment_size and 2372 * needs to be split. At the same time, child io must be a multiple of blocklen. 2373 * At the same time, child iovcnt exceeds parent iovcnt. 2374 */ 2375 bdev->max_segment_size = 512 + 128; 2376 bdev->max_num_segments = 3; 2377 g_io_done = false; 2378 2379 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2380 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2381 iov[i].iov_len = 512 + 256; 2382 } 2383 2384 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2385 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2386 iov[i].iov_len = 512 + 128; 2387 } 2388 2389 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2390 * Consume 4 parent IO iov entries per for() round and 6 block size. 2391 * Generate 9 child IOs. 2392 */ 2393 for (i = 0; i < 3; i++) { 2394 uint32_t j = i * 4; 2395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2396 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2397 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2398 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2399 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2400 2401 /* Child io must be a multiple of blocklen 2402 * iov[j + 2] must be split. If the third entry is also added, 2403 * the multiple of blocklen cannot be guaranteed. But it still 2404 * occupies one iov entry of the parent child iov. 2405 */ 2406 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2407 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2408 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2409 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2410 2411 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2412 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2413 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2414 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2415 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2416 } 2417 2418 /* Child iov position at 27, the 10th child IO 2419 * iov entry index is 3 * 4 and offset is 3 * 6 2420 */ 2421 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2422 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2423 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2424 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2425 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2426 2427 /* Child iov position at 30, the 11th child IO */ 2428 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2429 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2430 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2431 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2432 2433 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2434 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2435 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2436 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2437 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2438 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2439 2440 /* Consume 9 child IOs and 27 child iov entries. 2441 * Consume 4 parent IO iov entries per for() round and 6 block size. 2442 * Parent IO iov index start from 16 and block offset start from 24 2443 */ 2444 for (i = 0; i < 3; i++) { 2445 uint32_t j = i * 4 + 16; 2446 uint32_t offset = i * 6 + 24; 2447 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2448 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2449 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2450 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2451 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2452 2453 /* Child io must be a multiple of blocklen 2454 * iov[j + 2] must be split. If the third entry is also added, 2455 * the multiple of blocklen cannot be guaranteed. But it still 2456 * occupies one iov entry of the parent child iov. 2457 */ 2458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2459 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2460 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2461 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2462 2463 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2464 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2465 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2466 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2467 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2468 } 2469 2470 /* The 22th child IO, child iov position at 30 */ 2471 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2472 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2473 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2474 2475 /* The third round */ 2476 /* Here is the 23nd child IO and child iovpos is 0 */ 2477 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2478 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2479 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2480 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2481 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2482 2483 /* The 24th child IO */ 2484 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2485 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2486 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2487 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2489 2490 /* The 25th child IO */ 2491 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2492 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2493 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2495 2496 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2497 50, io_done, NULL); 2498 CU_ASSERT(rc == 0); 2499 CU_ASSERT(g_io_done == false); 2500 2501 /* Parent IO supports up to 32 child iovs, so it is calculated that 2502 * a maximum of 11 IOs can be split at a time, and the 2503 * splitting will continue after the first batch is over. 2504 */ 2505 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2506 stub_complete_io(11); 2507 CU_ASSERT(g_io_done == false); 2508 2509 /* The 2nd round */ 2510 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2511 stub_complete_io(11); 2512 CU_ASSERT(g_io_done == false); 2513 2514 /* The last round */ 2515 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2516 stub_complete_io(3); 2517 CU_ASSERT(g_io_done == true); 2518 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2519 2520 /* Test an WRITE_ZEROES. This should also not be split. */ 2521 bdev->max_segment_size = 512; 2522 bdev->max_num_segments = 1; 2523 g_io_done = false; 2524 2525 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2526 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2527 2528 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2529 CU_ASSERT(rc == 0); 2530 CU_ASSERT(g_io_done == false); 2531 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2532 stub_complete_io(1); 2533 CU_ASSERT(g_io_done == true); 2534 2535 /* Test an UNMAP. This should also not be split. */ 2536 g_io_done = false; 2537 2538 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2539 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2540 2541 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2542 CU_ASSERT(rc == 0); 2543 CU_ASSERT(g_io_done == false); 2544 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2545 stub_complete_io(1); 2546 CU_ASSERT(g_io_done == true); 2547 2548 /* Test a FLUSH. This should also not be split. */ 2549 g_io_done = false; 2550 2551 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2552 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2553 2554 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2555 CU_ASSERT(rc == 0); 2556 CU_ASSERT(g_io_done == false); 2557 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2558 stub_complete_io(1); 2559 CU_ASSERT(g_io_done == true); 2560 2561 /* Test a COPY. This should also not be split. */ 2562 g_io_done = false; 2563 2564 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2565 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2566 2567 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2568 CU_ASSERT(rc == 0); 2569 CU_ASSERT(g_io_done == false); 2570 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2571 stub_complete_io(1); 2572 CU_ASSERT(g_io_done == true); 2573 2574 /* Test that IOs are split on max_rw_size */ 2575 bdev->max_rw_size = 2; 2576 bdev->max_segment_size = 0; 2577 bdev->max_num_segments = 0; 2578 g_io_done = false; 2579 2580 /* 5 blocks in a contiguous buffer */ 2581 iov[0].iov_base = (void *)0x10000; 2582 iov[0].iov_len = 5 * 512; 2583 2584 /* First: offset=0, num_blocks=2 */ 2585 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2586 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2587 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2588 /* Second: offset=2, num_blocks=2 */ 2589 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1); 2590 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512); 2591 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2592 /* Third: offset=4, num_blocks=1 */ 2593 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2594 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512); 2595 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2596 2597 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL); 2598 CU_ASSERT(rc == 0); 2599 CU_ASSERT(g_io_done == false); 2600 2601 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2602 stub_complete_io(3); 2603 CU_ASSERT(g_io_done == true); 2604 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2605 2606 /* Check splitting on both max_rw_size + max_num_segments */ 2607 bdev->max_rw_size = 2; 2608 bdev->max_num_segments = 2; 2609 bdev->max_segment_size = 0; 2610 g_io_done = false; 2611 2612 /* 5 blocks split across 4 iovs */ 2613 iov[0].iov_base = (void *)0x10000; 2614 iov[0].iov_len = 3 * 512; 2615 iov[1].iov_base = (void *)0x20000; 2616 iov[1].iov_len = 256; 2617 iov[2].iov_base = (void *)0x30000; 2618 iov[2].iov_len = 256; 2619 iov[3].iov_base = (void *)0x40000; 2620 iov[3].iov_len = 512; 2621 2622 /* First: offset=0, num_blocks=2, iovcnt=1 */ 2623 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2624 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2625 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2626 /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting 2627 * the rest of iov[0], and iov[1]+iov[2]) 2628 */ 2629 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1); 2630 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512); 2631 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2632 /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */ 2633 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2); 2634 ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256); 2635 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256); 2636 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2637 /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */ 2638 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2639 ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512); 2640 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2641 2642 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL); 2643 CU_ASSERT(rc == 0); 2644 CU_ASSERT(g_io_done == false); 2645 2646 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2647 stub_complete_io(4); 2648 CU_ASSERT(g_io_done == true); 2649 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2650 2651 /* Check splitting on both max_rw_size + max_segment_size */ 2652 bdev->max_rw_size = 2; 2653 bdev->max_segment_size = 512; 2654 bdev->max_num_segments = 0; 2655 g_io_done = false; 2656 2657 /* 6 blocks in a contiguous buffer */ 2658 iov[0].iov_base = (void *)0x10000; 2659 iov[0].iov_len = 6 * 512; 2660 2661 /* We expect 3 IOs each with 2 blocks and 2 iovs */ 2662 for (i = 0; i < 3; ++i) { 2663 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2); 2664 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512); 2665 ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512); 2666 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2667 } 2668 2669 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL); 2670 CU_ASSERT(rc == 0); 2671 CU_ASSERT(g_io_done == false); 2672 2673 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2674 stub_complete_io(3); 2675 CU_ASSERT(g_io_done == true); 2676 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2677 2678 /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */ 2679 bdev->max_rw_size = 1; 2680 bdev->max_segment_size = 0; 2681 bdev->max_num_segments = 0; 2682 g_io_done = false; 2683 2684 /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */ 2685 iov[0].iov_base = (void *)0x10000; 2686 iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512; 2687 2688 /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */ 2689 for (i = 0; i < 3; ++i) { 2690 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1); 2691 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512); 2692 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2693 } 2694 2695 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2696 CU_ASSERT(rc == 0); 2697 CU_ASSERT(g_io_done == false); 2698 2699 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2700 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2701 CU_ASSERT(g_io_done == false); 2702 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2703 stub_complete_io(1); 2704 CU_ASSERT(g_io_done == true); 2705 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2706 2707 spdk_put_io_channel(io_ch); 2708 spdk_bdev_close(desc); 2709 free_bdev(bdev); 2710 ut_fini_bdev(); 2711 } 2712 2713 static void 2714 bdev_io_mix_split_test(void) 2715 { 2716 struct spdk_bdev *bdev; 2717 struct spdk_bdev_desc *desc = NULL; 2718 struct spdk_io_channel *io_ch; 2719 struct spdk_bdev_opts bdev_opts = {}; 2720 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2721 struct ut_expected_io *expected_io; 2722 uint64_t i; 2723 int rc; 2724 2725 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2726 bdev_opts.bdev_io_pool_size = 512; 2727 bdev_opts.bdev_io_cache_size = 64; 2728 ut_init_bdev(&bdev_opts); 2729 2730 bdev = allocate_bdev("bdev0"); 2731 2732 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2733 CU_ASSERT(rc == 0); 2734 SPDK_CU_ASSERT_FATAL(desc != NULL); 2735 io_ch = spdk_bdev_get_io_channel(desc); 2736 CU_ASSERT(io_ch != NULL); 2737 2738 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2739 bdev->split_on_optimal_io_boundary = true; 2740 bdev->optimal_io_boundary = 16; 2741 2742 bdev->max_segment_size = 512; 2743 bdev->max_num_segments = 16; 2744 g_io_done = false; 2745 2746 /* IO crossing the IO boundary requires split 2747 * Total 2 child IOs. 2748 */ 2749 2750 /* The 1st child IO split the segment_size to multiple segment entry */ 2751 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2752 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2753 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2754 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2755 2756 /* The 2nd child IO split the segment_size to multiple segment entry */ 2757 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2758 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2759 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2760 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2761 2762 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2763 CU_ASSERT(rc == 0); 2764 CU_ASSERT(g_io_done == false); 2765 2766 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2767 stub_complete_io(2); 2768 CU_ASSERT(g_io_done == true); 2769 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2770 2771 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2772 bdev->max_segment_size = 15 * 512; 2773 bdev->max_num_segments = 1; 2774 g_io_done = false; 2775 2776 /* IO crossing the IO boundary requires split. 2777 * The 1st child IO segment size exceeds the max_segment_size, 2778 * So 1st child IO will be split to multiple segment entry. 2779 * Then it split to 2 child IOs because of the max_num_segments. 2780 * Total 3 child IOs. 2781 */ 2782 2783 /* The first 2 IOs are in an IO boundary. 2784 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2785 * So it split to the first 2 IOs. 2786 */ 2787 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2788 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2789 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2790 2791 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2792 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2793 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2794 2795 /* The 3rd Child IO is because of the io boundary */ 2796 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2797 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2798 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2799 2800 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2801 CU_ASSERT(rc == 0); 2802 CU_ASSERT(g_io_done == false); 2803 2804 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2805 stub_complete_io(3); 2806 CU_ASSERT(g_io_done == true); 2807 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2808 2809 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2810 bdev->max_segment_size = 17 * 512; 2811 bdev->max_num_segments = 1; 2812 g_io_done = false; 2813 2814 /* IO crossing the IO boundary requires split. 2815 * Child IO does not split. 2816 * Total 2 child IOs. 2817 */ 2818 2819 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2820 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2821 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2822 2823 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2824 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2825 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2826 2827 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2828 CU_ASSERT(rc == 0); 2829 CU_ASSERT(g_io_done == false); 2830 2831 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2832 stub_complete_io(2); 2833 CU_ASSERT(g_io_done == true); 2834 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2835 2836 /* Now set up a more complex, multi-vector command that needs to be split, 2837 * including splitting iovecs. 2838 * optimal_io_boundary < max_segment_size * max_num_segments 2839 */ 2840 bdev->max_segment_size = 3 * 512; 2841 bdev->max_num_segments = 6; 2842 g_io_done = false; 2843 2844 iov[0].iov_base = (void *)0x10000; 2845 iov[0].iov_len = 4 * 512; 2846 iov[1].iov_base = (void *)0x20000; 2847 iov[1].iov_len = 4 * 512; 2848 iov[2].iov_base = (void *)0x30000; 2849 iov[2].iov_len = 10 * 512; 2850 2851 /* IO crossing the IO boundary requires split. 2852 * The 1st child IO segment size exceeds the max_segment_size and after 2853 * splitting segment_size, the num_segments exceeds max_num_segments. 2854 * So 1st child IO will be split to 2 child IOs. 2855 * Total 3 child IOs. 2856 */ 2857 2858 /* The first 2 IOs are in an IO boundary. 2859 * After splitting segment size the segment num exceeds. 2860 * So it splits to 2 child IOs. 2861 */ 2862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2863 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2864 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2865 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2866 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2867 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2868 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2869 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2870 2871 /* The 2nd child IO has the left segment entry */ 2872 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2873 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2874 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2875 2876 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2877 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2878 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2879 2880 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2881 CU_ASSERT(rc == 0); 2882 CU_ASSERT(g_io_done == false); 2883 2884 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2885 stub_complete_io(3); 2886 CU_ASSERT(g_io_done == true); 2887 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2888 2889 /* A very complicated case. Each sg entry exceeds max_segment_size 2890 * and split on io boundary. 2891 * optimal_io_boundary < max_segment_size * max_num_segments 2892 */ 2893 bdev->max_segment_size = 3 * 512; 2894 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2895 g_io_done = false; 2896 2897 for (i = 0; i < 20; i++) { 2898 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2899 iov[i].iov_len = 512 * 4; 2900 } 2901 2902 /* IO crossing the IO boundary requires split. 2903 * 80 block length can split 5 child IOs base on offset and IO boundary. 2904 * Each iov entry needs to be split to 2 entries because of max_segment_size 2905 * Total 5 child IOs. 2906 */ 2907 2908 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2909 * So each child IO occupies 8 child iov entries. 2910 */ 2911 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2912 for (i = 0; i < 4; i++) { 2913 int iovcnt = i * 2; 2914 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2915 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2916 } 2917 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2918 2919 /* 2nd child IO and total 16 child iov entries of parent IO */ 2920 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2921 for (i = 4; i < 8; i++) { 2922 int iovcnt = (i - 4) * 2; 2923 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2924 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2925 } 2926 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2927 2928 /* 3rd child IO and total 24 child iov entries of parent IO */ 2929 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2930 for (i = 8; i < 12; i++) { 2931 int iovcnt = (i - 8) * 2; 2932 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2933 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2934 } 2935 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2936 2937 /* 4th child IO and total 32 child iov entries of parent IO */ 2938 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2939 for (i = 12; i < 16; i++) { 2940 int iovcnt = (i - 12) * 2; 2941 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2942 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2943 } 2944 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2945 2946 /* 5th child IO and because of the child iov entry it should be split 2947 * in next round. 2948 */ 2949 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2950 for (i = 16; i < 20; i++) { 2951 int iovcnt = (i - 16) * 2; 2952 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2953 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2954 } 2955 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2956 2957 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2958 CU_ASSERT(rc == 0); 2959 CU_ASSERT(g_io_done == false); 2960 2961 /* First split round */ 2962 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2963 stub_complete_io(4); 2964 CU_ASSERT(g_io_done == false); 2965 2966 /* Second split round */ 2967 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2968 stub_complete_io(1); 2969 CU_ASSERT(g_io_done == true); 2970 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2971 2972 spdk_put_io_channel(io_ch); 2973 spdk_bdev_close(desc); 2974 free_bdev(bdev); 2975 ut_fini_bdev(); 2976 } 2977 2978 static void 2979 bdev_io_split_with_io_wait(void) 2980 { 2981 struct spdk_bdev *bdev; 2982 struct spdk_bdev_desc *desc = NULL; 2983 struct spdk_io_channel *io_ch; 2984 struct spdk_bdev_channel *channel; 2985 struct spdk_bdev_mgmt_channel *mgmt_ch; 2986 struct spdk_bdev_opts bdev_opts = {}; 2987 struct iovec iov[3]; 2988 struct ut_expected_io *expected_io; 2989 int rc; 2990 2991 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2992 bdev_opts.bdev_io_pool_size = 2; 2993 bdev_opts.bdev_io_cache_size = 1; 2994 ut_init_bdev(&bdev_opts); 2995 2996 bdev = allocate_bdev("bdev0"); 2997 2998 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2999 CU_ASSERT(rc == 0); 3000 CU_ASSERT(desc != NULL); 3001 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3002 io_ch = spdk_bdev_get_io_channel(desc); 3003 CU_ASSERT(io_ch != NULL); 3004 channel = spdk_io_channel_get_ctx(io_ch); 3005 mgmt_ch = channel->shared_resource->mgmt_ch; 3006 3007 bdev->optimal_io_boundary = 16; 3008 bdev->split_on_optimal_io_boundary = true; 3009 3010 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 3011 CU_ASSERT(rc == 0); 3012 3013 /* Now test that a single-vector command is split correctly. 3014 * Offset 14, length 8, payload 0xF000 3015 * Child - Offset 14, length 2, payload 0xF000 3016 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3017 * 3018 * Set up the expected values before calling spdk_bdev_read_blocks 3019 */ 3020 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 3021 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 3022 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3023 3024 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 3025 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 3026 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3027 3028 /* The following children will be submitted sequentially due to the capacity of 3029 * spdk_bdev_io. 3030 */ 3031 3032 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 3033 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 3034 CU_ASSERT(rc == 0); 3035 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3036 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3037 3038 /* Completing the first read I/O will submit the first child */ 3039 stub_complete_io(1); 3040 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3041 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3042 3043 /* Completing the first child will submit the second child */ 3044 stub_complete_io(1); 3045 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3046 3047 /* Complete the second child I/O. This should result in our callback getting 3048 * invoked since the parent I/O is now complete. 3049 */ 3050 stub_complete_io(1); 3051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3052 3053 /* Now set up a more complex, multi-vector command that needs to be split, 3054 * including splitting iovecs. 3055 */ 3056 iov[0].iov_base = (void *)0x10000; 3057 iov[0].iov_len = 512; 3058 iov[1].iov_base = (void *)0x20000; 3059 iov[1].iov_len = 20 * 512; 3060 iov[2].iov_base = (void *)0x30000; 3061 iov[2].iov_len = 11 * 512; 3062 3063 g_io_done = false; 3064 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 3065 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 3066 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 3067 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3068 3069 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 3070 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 3071 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3072 3073 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 3074 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 3075 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 3076 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3077 3078 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 3079 CU_ASSERT(rc == 0); 3080 CU_ASSERT(g_io_done == false); 3081 3082 /* The following children will be submitted sequentially due to the capacity of 3083 * spdk_bdev_io. 3084 */ 3085 3086 /* Completing the first child will submit the second child */ 3087 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3088 stub_complete_io(1); 3089 CU_ASSERT(g_io_done == false); 3090 3091 /* Completing the second child will submit the third child */ 3092 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3093 stub_complete_io(1); 3094 CU_ASSERT(g_io_done == false); 3095 3096 /* Completing the third child will result in our callback getting invoked 3097 * since the parent I/O is now complete. 3098 */ 3099 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3100 stub_complete_io(1); 3101 CU_ASSERT(g_io_done == true); 3102 3103 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 3104 3105 spdk_put_io_channel(io_ch); 3106 spdk_bdev_close(desc); 3107 free_bdev(bdev); 3108 ut_fini_bdev(); 3109 } 3110 3111 static void 3112 bdev_io_write_unit_split_test(void) 3113 { 3114 struct spdk_bdev *bdev; 3115 struct spdk_bdev_desc *desc = NULL; 3116 struct spdk_io_channel *io_ch; 3117 struct spdk_bdev_opts bdev_opts = {}; 3118 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 3119 struct ut_expected_io *expected_io; 3120 uint64_t i; 3121 int rc; 3122 3123 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3124 bdev_opts.bdev_io_pool_size = 512; 3125 bdev_opts.bdev_io_cache_size = 64; 3126 ut_init_bdev(&bdev_opts); 3127 3128 bdev = allocate_bdev("bdev0"); 3129 3130 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 3131 CU_ASSERT(rc == 0); 3132 SPDK_CU_ASSERT_FATAL(desc != NULL); 3133 io_ch = spdk_bdev_get_io_channel(desc); 3134 CU_ASSERT(io_ch != NULL); 3135 3136 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 3137 bdev->write_unit_size = 32; 3138 bdev->split_on_write_unit = true; 3139 g_io_done = false; 3140 3141 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 3142 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 3143 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3144 3145 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 3146 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 3147 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3148 3149 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3150 CU_ASSERT(rc == 0); 3151 CU_ASSERT(g_io_done == false); 3152 3153 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3154 stub_complete_io(2); 3155 CU_ASSERT(g_io_done == true); 3156 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3157 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3158 3159 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 3160 * based on write_unit_size, not optimal_io_boundary */ 3161 bdev->split_on_optimal_io_boundary = true; 3162 bdev->optimal_io_boundary = 16; 3163 g_io_done = false; 3164 3165 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3166 CU_ASSERT(rc == 0); 3167 CU_ASSERT(g_io_done == false); 3168 3169 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3170 stub_complete_io(2); 3171 CU_ASSERT(g_io_done == true); 3172 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3173 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3174 3175 /* Write I/O should fail if it is smaller than write_unit_size */ 3176 g_io_done = false; 3177 3178 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3179 CU_ASSERT(rc == 0); 3180 CU_ASSERT(g_io_done == false); 3181 3182 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3183 poll_threads(); 3184 CU_ASSERT(g_io_done == true); 3185 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3186 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3187 3188 /* Same for I/O not aligned to write_unit_size */ 3189 g_io_done = false; 3190 3191 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3192 CU_ASSERT(rc == 0); 3193 CU_ASSERT(g_io_done == false); 3194 3195 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3196 poll_threads(); 3197 CU_ASSERT(g_io_done == true); 3198 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3199 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3200 3201 /* Write should fail if it needs to be split but there are not enough iovs to submit 3202 * an entire write unit */ 3203 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3204 g_io_done = false; 3205 3206 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3207 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3208 iov[i].iov_len = 512; 3209 } 3210 3211 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3212 io_done, NULL); 3213 CU_ASSERT(rc == 0); 3214 CU_ASSERT(g_io_done == false); 3215 3216 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3217 poll_threads(); 3218 CU_ASSERT(g_io_done == true); 3219 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3220 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3221 3222 spdk_put_io_channel(io_ch); 3223 spdk_bdev_close(desc); 3224 free_bdev(bdev); 3225 ut_fini_bdev(); 3226 } 3227 3228 static void 3229 bdev_io_alignment(void) 3230 { 3231 struct spdk_bdev *bdev; 3232 struct spdk_bdev_desc *desc = NULL; 3233 struct spdk_io_channel *io_ch; 3234 struct spdk_bdev_opts bdev_opts = {}; 3235 int rc; 3236 void *buf = NULL; 3237 struct iovec iovs[2]; 3238 int iovcnt; 3239 uint64_t alignment; 3240 3241 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3242 bdev_opts.bdev_io_pool_size = 20; 3243 bdev_opts.bdev_io_cache_size = 2; 3244 ut_init_bdev(&bdev_opts); 3245 3246 fn_table.submit_request = stub_submit_request_get_buf; 3247 bdev = allocate_bdev("bdev0"); 3248 3249 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3250 CU_ASSERT(rc == 0); 3251 CU_ASSERT(desc != NULL); 3252 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3253 io_ch = spdk_bdev_get_io_channel(desc); 3254 CU_ASSERT(io_ch != NULL); 3255 3256 /* Create aligned buffer */ 3257 rc = posix_memalign(&buf, 4096, 8192); 3258 SPDK_CU_ASSERT_FATAL(rc == 0); 3259 3260 /* Pass aligned single buffer with no alignment required */ 3261 alignment = 1; 3262 bdev->required_alignment = spdk_u32log2(alignment); 3263 3264 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3265 CU_ASSERT(rc == 0); 3266 stub_complete_io(1); 3267 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3268 alignment)); 3269 3270 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3271 CU_ASSERT(rc == 0); 3272 stub_complete_io(1); 3273 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3274 alignment)); 3275 3276 /* Pass unaligned single buffer with no alignment required */ 3277 alignment = 1; 3278 bdev->required_alignment = spdk_u32log2(alignment); 3279 3280 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3281 CU_ASSERT(rc == 0); 3282 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3283 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3284 stub_complete_io(1); 3285 3286 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3287 CU_ASSERT(rc == 0); 3288 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3289 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3290 stub_complete_io(1); 3291 3292 /* Pass unaligned single buffer with 512 alignment required */ 3293 alignment = 512; 3294 bdev->required_alignment = spdk_u32log2(alignment); 3295 3296 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3297 CU_ASSERT(rc == 0); 3298 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3299 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3300 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3301 alignment)); 3302 stub_complete_io(1); 3303 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3304 3305 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3306 CU_ASSERT(rc == 0); 3307 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3308 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3309 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3310 alignment)); 3311 stub_complete_io(1); 3312 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3313 3314 /* Pass unaligned single buffer with 4096 alignment required */ 3315 alignment = 4096; 3316 bdev->required_alignment = spdk_u32log2(alignment); 3317 3318 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3319 CU_ASSERT(rc == 0); 3320 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3321 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3322 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3323 alignment)); 3324 stub_complete_io(1); 3325 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3326 3327 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3328 CU_ASSERT(rc == 0); 3329 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3330 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3331 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3332 alignment)); 3333 stub_complete_io(1); 3334 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3335 3336 /* Pass aligned iovs with no alignment required */ 3337 alignment = 1; 3338 bdev->required_alignment = spdk_u32log2(alignment); 3339 3340 iovcnt = 1; 3341 iovs[0].iov_base = buf; 3342 iovs[0].iov_len = 512; 3343 3344 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3345 CU_ASSERT(rc == 0); 3346 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3347 stub_complete_io(1); 3348 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3349 3350 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3351 CU_ASSERT(rc == 0); 3352 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3353 stub_complete_io(1); 3354 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3355 3356 /* Pass unaligned iovs with no alignment required */ 3357 alignment = 1; 3358 bdev->required_alignment = spdk_u32log2(alignment); 3359 3360 iovcnt = 2; 3361 iovs[0].iov_base = buf + 16; 3362 iovs[0].iov_len = 256; 3363 iovs[1].iov_base = buf + 16 + 256 + 32; 3364 iovs[1].iov_len = 256; 3365 3366 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3367 CU_ASSERT(rc == 0); 3368 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3369 stub_complete_io(1); 3370 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3371 3372 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3373 CU_ASSERT(rc == 0); 3374 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3375 stub_complete_io(1); 3376 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3377 3378 /* Pass unaligned iov with 2048 alignment required */ 3379 alignment = 2048; 3380 bdev->required_alignment = spdk_u32log2(alignment); 3381 3382 iovcnt = 2; 3383 iovs[0].iov_base = buf + 16; 3384 iovs[0].iov_len = 256; 3385 iovs[1].iov_base = buf + 16 + 256 + 32; 3386 iovs[1].iov_len = 256; 3387 3388 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3389 CU_ASSERT(rc == 0); 3390 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3391 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3392 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3393 alignment)); 3394 stub_complete_io(1); 3395 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3396 3397 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3398 CU_ASSERT(rc == 0); 3399 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3400 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3401 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3402 alignment)); 3403 stub_complete_io(1); 3404 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3405 3406 /* Pass iov without allocated buffer without alignment required */ 3407 alignment = 1; 3408 bdev->required_alignment = spdk_u32log2(alignment); 3409 3410 iovcnt = 1; 3411 iovs[0].iov_base = NULL; 3412 iovs[0].iov_len = 0; 3413 3414 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3415 CU_ASSERT(rc == 0); 3416 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3417 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3418 alignment)); 3419 stub_complete_io(1); 3420 3421 /* Pass iov without allocated buffer with 1024 alignment required */ 3422 alignment = 1024; 3423 bdev->required_alignment = spdk_u32log2(alignment); 3424 3425 iovcnt = 1; 3426 iovs[0].iov_base = NULL; 3427 iovs[0].iov_len = 0; 3428 3429 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3430 CU_ASSERT(rc == 0); 3431 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3432 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3433 alignment)); 3434 stub_complete_io(1); 3435 3436 spdk_put_io_channel(io_ch); 3437 spdk_bdev_close(desc); 3438 free_bdev(bdev); 3439 fn_table.submit_request = stub_submit_request; 3440 ut_fini_bdev(); 3441 3442 free(buf); 3443 } 3444 3445 static void 3446 bdev_io_alignment_with_boundary(void) 3447 { 3448 struct spdk_bdev *bdev; 3449 struct spdk_bdev_desc *desc = NULL; 3450 struct spdk_io_channel *io_ch; 3451 struct spdk_bdev_opts bdev_opts = {}; 3452 int rc; 3453 void *buf = NULL; 3454 struct iovec iovs[2]; 3455 int iovcnt; 3456 uint64_t alignment; 3457 3458 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3459 bdev_opts.bdev_io_pool_size = 20; 3460 bdev_opts.bdev_io_cache_size = 2; 3461 bdev_opts.opts_size = sizeof(bdev_opts); 3462 ut_init_bdev(&bdev_opts); 3463 3464 fn_table.submit_request = stub_submit_request_get_buf; 3465 bdev = allocate_bdev("bdev0"); 3466 3467 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3468 CU_ASSERT(rc == 0); 3469 CU_ASSERT(desc != NULL); 3470 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3471 io_ch = spdk_bdev_get_io_channel(desc); 3472 CU_ASSERT(io_ch != NULL); 3473 3474 /* Create aligned buffer */ 3475 rc = posix_memalign(&buf, 4096, 131072); 3476 SPDK_CU_ASSERT_FATAL(rc == 0); 3477 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3478 3479 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3480 alignment = 512; 3481 bdev->required_alignment = spdk_u32log2(alignment); 3482 bdev->optimal_io_boundary = 2; 3483 bdev->split_on_optimal_io_boundary = true; 3484 3485 iovcnt = 1; 3486 iovs[0].iov_base = NULL; 3487 iovs[0].iov_len = 512 * 3; 3488 3489 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3490 CU_ASSERT(rc == 0); 3491 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3492 stub_complete_io(2); 3493 3494 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3495 alignment = 512; 3496 bdev->required_alignment = spdk_u32log2(alignment); 3497 bdev->optimal_io_boundary = 16; 3498 bdev->split_on_optimal_io_boundary = true; 3499 3500 iovcnt = 1; 3501 iovs[0].iov_base = NULL; 3502 iovs[0].iov_len = 512 * 16; 3503 3504 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3505 CU_ASSERT(rc == 0); 3506 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3507 stub_complete_io(2); 3508 3509 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3510 alignment = 512; 3511 bdev->required_alignment = spdk_u32log2(alignment); 3512 bdev->optimal_io_boundary = 128; 3513 bdev->split_on_optimal_io_boundary = true; 3514 3515 iovcnt = 1; 3516 iovs[0].iov_base = buf + 16; 3517 iovs[0].iov_len = 512 * 160; 3518 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3519 CU_ASSERT(rc == 0); 3520 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3521 stub_complete_io(2); 3522 3523 /* 512 * 3 with 2 IO boundary */ 3524 alignment = 512; 3525 bdev->required_alignment = spdk_u32log2(alignment); 3526 bdev->optimal_io_boundary = 2; 3527 bdev->split_on_optimal_io_boundary = true; 3528 3529 iovcnt = 2; 3530 iovs[0].iov_base = buf + 16; 3531 iovs[0].iov_len = 512; 3532 iovs[1].iov_base = buf + 16 + 512 + 32; 3533 iovs[1].iov_len = 1024; 3534 3535 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3536 CU_ASSERT(rc == 0); 3537 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3538 stub_complete_io(2); 3539 3540 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3541 CU_ASSERT(rc == 0); 3542 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3543 stub_complete_io(2); 3544 3545 /* 512 * 64 with 32 IO boundary */ 3546 bdev->optimal_io_boundary = 32; 3547 iovcnt = 2; 3548 iovs[0].iov_base = buf + 16; 3549 iovs[0].iov_len = 16384; 3550 iovs[1].iov_base = buf + 16 + 16384 + 32; 3551 iovs[1].iov_len = 16384; 3552 3553 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3554 CU_ASSERT(rc == 0); 3555 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3556 stub_complete_io(3); 3557 3558 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3559 CU_ASSERT(rc == 0); 3560 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3561 stub_complete_io(3); 3562 3563 /* 512 * 160 with 32 IO boundary */ 3564 iovcnt = 1; 3565 iovs[0].iov_base = buf + 16; 3566 iovs[0].iov_len = 16384 + 65536; 3567 3568 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3569 CU_ASSERT(rc == 0); 3570 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3571 stub_complete_io(6); 3572 3573 spdk_put_io_channel(io_ch); 3574 spdk_bdev_close(desc); 3575 free_bdev(bdev); 3576 fn_table.submit_request = stub_submit_request; 3577 ut_fini_bdev(); 3578 3579 free(buf); 3580 } 3581 3582 static void 3583 histogram_status_cb(void *cb_arg, int status) 3584 { 3585 g_status = status; 3586 } 3587 3588 static void 3589 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3590 { 3591 g_status = status; 3592 g_histogram = histogram; 3593 } 3594 3595 static void 3596 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3597 uint64_t total, uint64_t so_far) 3598 { 3599 g_count += count; 3600 } 3601 3602 static void 3603 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3604 { 3605 spdk_histogram_data_fn cb_fn = cb_arg; 3606 3607 g_status = status; 3608 3609 if (status == 0) { 3610 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3611 } 3612 } 3613 3614 static void 3615 bdev_histograms(void) 3616 { 3617 struct spdk_bdev *bdev; 3618 struct spdk_bdev_desc *desc = NULL; 3619 struct spdk_io_channel *ch; 3620 struct spdk_histogram_data *histogram; 3621 uint8_t buf[4096]; 3622 int rc; 3623 3624 ut_init_bdev(NULL); 3625 3626 bdev = allocate_bdev("bdev"); 3627 3628 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3629 CU_ASSERT(rc == 0); 3630 CU_ASSERT(desc != NULL); 3631 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3632 3633 ch = spdk_bdev_get_io_channel(desc); 3634 CU_ASSERT(ch != NULL); 3635 3636 /* Enable histogram */ 3637 g_status = -1; 3638 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3639 poll_threads(); 3640 CU_ASSERT(g_status == 0); 3641 CU_ASSERT(bdev->internal.histogram_enabled == true); 3642 3643 /* Allocate histogram */ 3644 histogram = spdk_histogram_data_alloc(); 3645 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3646 3647 /* Check if histogram is zeroed */ 3648 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3649 poll_threads(); 3650 CU_ASSERT(g_status == 0); 3651 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3652 3653 g_count = 0; 3654 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3655 3656 CU_ASSERT(g_count == 0); 3657 3658 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3659 CU_ASSERT(rc == 0); 3660 3661 spdk_delay_us(10); 3662 stub_complete_io(1); 3663 poll_threads(); 3664 3665 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3666 CU_ASSERT(rc == 0); 3667 3668 spdk_delay_us(10); 3669 stub_complete_io(1); 3670 poll_threads(); 3671 3672 /* Check if histogram gathered data from all I/O channels */ 3673 g_histogram = NULL; 3674 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3675 poll_threads(); 3676 CU_ASSERT(g_status == 0); 3677 CU_ASSERT(bdev->internal.histogram_enabled == true); 3678 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3679 3680 g_count = 0; 3681 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3682 CU_ASSERT(g_count == 2); 3683 3684 g_count = 0; 3685 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3686 CU_ASSERT(g_status == 0); 3687 CU_ASSERT(g_count == 2); 3688 3689 /* Disable histogram */ 3690 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3691 poll_threads(); 3692 CU_ASSERT(g_status == 0); 3693 CU_ASSERT(bdev->internal.histogram_enabled == false); 3694 3695 /* Try to run histogram commands on disabled bdev */ 3696 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3697 poll_threads(); 3698 CU_ASSERT(g_status == -EFAULT); 3699 3700 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3701 CU_ASSERT(g_status == -EFAULT); 3702 3703 spdk_histogram_data_free(histogram); 3704 spdk_put_io_channel(ch); 3705 spdk_bdev_close(desc); 3706 free_bdev(bdev); 3707 ut_fini_bdev(); 3708 } 3709 3710 static void 3711 _bdev_compare(bool emulated) 3712 { 3713 struct spdk_bdev *bdev; 3714 struct spdk_bdev_desc *desc = NULL; 3715 struct spdk_io_channel *ioch; 3716 struct ut_expected_io *expected_io; 3717 uint64_t offset, num_blocks; 3718 uint32_t num_completed; 3719 char aa_buf[512]; 3720 char bb_buf[512]; 3721 struct iovec compare_iov; 3722 uint8_t expected_io_type; 3723 int rc; 3724 3725 if (emulated) { 3726 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3727 } else { 3728 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3729 } 3730 3731 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3732 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3733 3734 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3735 3736 ut_init_bdev(NULL); 3737 fn_table.submit_request = stub_submit_request_get_buf; 3738 bdev = allocate_bdev("bdev"); 3739 3740 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3741 CU_ASSERT_EQUAL(rc, 0); 3742 SPDK_CU_ASSERT_FATAL(desc != NULL); 3743 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3744 ioch = spdk_bdev_get_io_channel(desc); 3745 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3746 3747 fn_table.submit_request = stub_submit_request_get_buf; 3748 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3749 3750 offset = 50; 3751 num_blocks = 1; 3752 compare_iov.iov_base = aa_buf; 3753 compare_iov.iov_len = sizeof(aa_buf); 3754 3755 /* 1. successful comparev */ 3756 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3757 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3758 3759 g_io_done = false; 3760 g_compare_read_buf = aa_buf; 3761 g_compare_read_buf_len = sizeof(aa_buf); 3762 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3763 CU_ASSERT_EQUAL(rc, 0); 3764 num_completed = stub_complete_io(1); 3765 CU_ASSERT_EQUAL(num_completed, 1); 3766 CU_ASSERT(g_io_done == true); 3767 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3768 3769 /* 2. miscompare comparev */ 3770 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3771 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3772 3773 g_io_done = false; 3774 g_compare_read_buf = bb_buf; 3775 g_compare_read_buf_len = sizeof(bb_buf); 3776 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3777 CU_ASSERT_EQUAL(rc, 0); 3778 num_completed = stub_complete_io(1); 3779 CU_ASSERT_EQUAL(num_completed, 1); 3780 CU_ASSERT(g_io_done == true); 3781 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3782 3783 /* 3. successful compare */ 3784 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3785 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3786 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3787 3788 g_io_done = false; 3789 g_compare_read_buf = aa_buf; 3790 g_compare_read_buf_len = sizeof(aa_buf); 3791 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3792 CU_ASSERT_EQUAL(rc, 0); 3793 num_completed = stub_complete_io(1); 3794 CU_ASSERT_EQUAL(num_completed, 1); 3795 CU_ASSERT(g_io_done == true); 3796 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3797 3798 /* 4. miscompare compare */ 3799 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3800 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3801 3802 g_io_done = false; 3803 g_compare_read_buf = bb_buf; 3804 g_compare_read_buf_len = sizeof(bb_buf); 3805 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3806 CU_ASSERT_EQUAL(rc, 0); 3807 num_completed = stub_complete_io(1); 3808 CU_ASSERT_EQUAL(num_completed, 1); 3809 CU_ASSERT(g_io_done == true); 3810 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3811 3812 spdk_put_io_channel(ioch); 3813 spdk_bdev_close(desc); 3814 free_bdev(bdev); 3815 fn_table.submit_request = stub_submit_request; 3816 ut_fini_bdev(); 3817 3818 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3819 3820 g_compare_read_buf = NULL; 3821 } 3822 3823 static void 3824 _bdev_compare_with_md(bool emulated) 3825 { 3826 struct spdk_bdev *bdev; 3827 struct spdk_bdev_desc *desc = NULL; 3828 struct spdk_io_channel *ioch; 3829 struct ut_expected_io *expected_io; 3830 uint64_t offset, num_blocks; 3831 uint32_t num_completed; 3832 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3833 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3834 char buf_miscompare[1024 /* 2 * blocklen */]; 3835 char md_buf[16]; 3836 char md_buf_miscompare[16]; 3837 struct iovec compare_iov; 3838 uint8_t expected_io_type; 3839 int rc; 3840 3841 if (emulated) { 3842 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3843 } else { 3844 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3845 } 3846 3847 memset(buf, 0xaa, sizeof(buf)); 3848 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3849 /* make last md different */ 3850 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3851 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3852 memset(md_buf, 0xaa, 16); 3853 memset(md_buf_miscompare, 0xbb, 16); 3854 3855 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3856 3857 ut_init_bdev(NULL); 3858 fn_table.submit_request = stub_submit_request_get_buf; 3859 bdev = allocate_bdev("bdev"); 3860 3861 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3862 CU_ASSERT_EQUAL(rc, 0); 3863 SPDK_CU_ASSERT_FATAL(desc != NULL); 3864 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3865 ioch = spdk_bdev_get_io_channel(desc); 3866 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3867 3868 fn_table.submit_request = stub_submit_request_get_buf; 3869 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3870 3871 offset = 50; 3872 num_blocks = 2; 3873 3874 /* interleaved md & data */ 3875 bdev->md_interleave = true; 3876 bdev->md_len = 8; 3877 bdev->blocklen = 512 + 8; 3878 compare_iov.iov_base = buf; 3879 compare_iov.iov_len = sizeof(buf); 3880 3881 /* 1. successful compare with md interleaved */ 3882 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3883 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3884 3885 g_io_done = false; 3886 g_compare_read_buf = buf; 3887 g_compare_read_buf_len = sizeof(buf); 3888 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3889 CU_ASSERT_EQUAL(rc, 0); 3890 num_completed = stub_complete_io(1); 3891 CU_ASSERT_EQUAL(num_completed, 1); 3892 CU_ASSERT(g_io_done == true); 3893 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3894 3895 /* 2. miscompare with md interleaved */ 3896 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3897 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3898 3899 g_io_done = false; 3900 g_compare_read_buf = buf_interleaved_miscompare; 3901 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3902 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3903 CU_ASSERT_EQUAL(rc, 0); 3904 num_completed = stub_complete_io(1); 3905 CU_ASSERT_EQUAL(num_completed, 1); 3906 CU_ASSERT(g_io_done == true); 3907 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3908 3909 /* Separate data & md buffers */ 3910 bdev->md_interleave = false; 3911 bdev->blocklen = 512; 3912 compare_iov.iov_base = buf; 3913 compare_iov.iov_len = 1024; 3914 3915 /* 3. successful compare with md separated */ 3916 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3917 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3918 3919 g_io_done = false; 3920 g_compare_read_buf = buf; 3921 g_compare_read_buf_len = 1024; 3922 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3923 g_compare_md_buf = md_buf; 3924 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3925 offset, num_blocks, io_done, NULL); 3926 CU_ASSERT_EQUAL(rc, 0); 3927 num_completed = stub_complete_io(1); 3928 CU_ASSERT_EQUAL(num_completed, 1); 3929 CU_ASSERT(g_io_done == true); 3930 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3931 3932 /* 4. miscompare with md separated where md buf is different */ 3933 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3934 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3935 3936 g_io_done = false; 3937 g_compare_read_buf = buf; 3938 g_compare_read_buf_len = 1024; 3939 g_compare_md_buf = md_buf_miscompare; 3940 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3941 offset, num_blocks, io_done, NULL); 3942 CU_ASSERT_EQUAL(rc, 0); 3943 num_completed = stub_complete_io(1); 3944 CU_ASSERT_EQUAL(num_completed, 1); 3945 CU_ASSERT(g_io_done == true); 3946 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3947 3948 /* 5. miscompare with md separated where buf is different */ 3949 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3950 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3951 3952 g_io_done = false; 3953 g_compare_read_buf = buf_miscompare; 3954 g_compare_read_buf_len = sizeof(buf_miscompare); 3955 g_compare_md_buf = md_buf; 3956 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3957 offset, num_blocks, io_done, NULL); 3958 CU_ASSERT_EQUAL(rc, 0); 3959 num_completed = stub_complete_io(1); 3960 CU_ASSERT_EQUAL(num_completed, 1); 3961 CU_ASSERT(g_io_done == true); 3962 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3963 3964 bdev->md_len = 0; 3965 g_compare_md_buf = NULL; 3966 3967 spdk_put_io_channel(ioch); 3968 spdk_bdev_close(desc); 3969 free_bdev(bdev); 3970 fn_table.submit_request = stub_submit_request; 3971 ut_fini_bdev(); 3972 3973 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3974 3975 g_compare_read_buf = NULL; 3976 } 3977 3978 static void 3979 bdev_compare(void) 3980 { 3981 _bdev_compare(false); 3982 _bdev_compare_with_md(false); 3983 } 3984 3985 static void 3986 bdev_compare_emulated(void) 3987 { 3988 _bdev_compare(true); 3989 _bdev_compare_with_md(true); 3990 } 3991 3992 static void 3993 bdev_compare_and_write(void) 3994 { 3995 struct spdk_bdev *bdev; 3996 struct spdk_bdev_desc *desc = NULL; 3997 struct spdk_io_channel *ioch; 3998 struct ut_expected_io *expected_io; 3999 uint64_t offset, num_blocks; 4000 uint32_t num_completed; 4001 char aa_buf[512]; 4002 char bb_buf[512]; 4003 char cc_buf[512]; 4004 char write_buf[512]; 4005 struct iovec compare_iov; 4006 struct iovec write_iov; 4007 int rc; 4008 4009 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4010 memset(bb_buf, 0xbb, sizeof(bb_buf)); 4011 memset(cc_buf, 0xcc, sizeof(cc_buf)); 4012 4013 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 4014 4015 ut_init_bdev(NULL); 4016 fn_table.submit_request = stub_submit_request_get_buf; 4017 bdev = allocate_bdev("bdev"); 4018 4019 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4020 CU_ASSERT_EQUAL(rc, 0); 4021 SPDK_CU_ASSERT_FATAL(desc != NULL); 4022 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4023 ioch = spdk_bdev_get_io_channel(desc); 4024 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4025 4026 fn_table.submit_request = stub_submit_request_get_buf; 4027 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4028 4029 offset = 50; 4030 num_blocks = 1; 4031 compare_iov.iov_base = aa_buf; 4032 compare_iov.iov_len = sizeof(aa_buf); 4033 write_iov.iov_base = bb_buf; 4034 write_iov.iov_len = sizeof(bb_buf); 4035 4036 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4037 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4038 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 4039 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4040 4041 g_io_done = false; 4042 g_compare_read_buf = aa_buf; 4043 g_compare_read_buf_len = sizeof(aa_buf); 4044 memset(write_buf, 0, sizeof(write_buf)); 4045 g_compare_write_buf = write_buf; 4046 g_compare_write_buf_len = sizeof(write_buf); 4047 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4048 offset, num_blocks, io_done, NULL); 4049 /* Trigger range locking */ 4050 poll_threads(); 4051 CU_ASSERT_EQUAL(rc, 0); 4052 num_completed = stub_complete_io(1); 4053 CU_ASSERT_EQUAL(num_completed, 1); 4054 CU_ASSERT(g_io_done == false); 4055 num_completed = stub_complete_io(1); 4056 /* Trigger range unlocking */ 4057 poll_threads(); 4058 CU_ASSERT_EQUAL(num_completed, 1); 4059 CU_ASSERT(g_io_done == true); 4060 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4061 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 4062 4063 /* Test miscompare */ 4064 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4065 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4066 4067 g_io_done = false; 4068 g_compare_read_buf = cc_buf; 4069 g_compare_read_buf_len = sizeof(cc_buf); 4070 memset(write_buf, 0, sizeof(write_buf)); 4071 g_compare_write_buf = write_buf; 4072 g_compare_write_buf_len = sizeof(write_buf); 4073 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4074 offset, num_blocks, io_done, NULL); 4075 /* Trigger range locking */ 4076 poll_threads(); 4077 CU_ASSERT_EQUAL(rc, 0); 4078 num_completed = stub_complete_io(1); 4079 /* Trigger range unlocking earlier because we expect error here */ 4080 poll_threads(); 4081 CU_ASSERT_EQUAL(num_completed, 1); 4082 CU_ASSERT(g_io_done == true); 4083 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 4084 num_completed = stub_complete_io(1); 4085 CU_ASSERT_EQUAL(num_completed, 0); 4086 4087 spdk_put_io_channel(ioch); 4088 spdk_bdev_close(desc); 4089 free_bdev(bdev); 4090 fn_table.submit_request = stub_submit_request; 4091 ut_fini_bdev(); 4092 4093 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 4094 4095 g_compare_read_buf = NULL; 4096 g_compare_write_buf = NULL; 4097 } 4098 4099 static void 4100 bdev_write_zeroes(void) 4101 { 4102 struct spdk_bdev *bdev; 4103 struct spdk_bdev_desc *desc = NULL; 4104 struct spdk_io_channel *ioch; 4105 struct ut_expected_io *expected_io; 4106 uint64_t offset, num_io_blocks, num_blocks; 4107 uint32_t num_completed, num_requests; 4108 int rc; 4109 4110 ut_init_bdev(NULL); 4111 bdev = allocate_bdev("bdev"); 4112 4113 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4114 CU_ASSERT_EQUAL(rc, 0); 4115 SPDK_CU_ASSERT_FATAL(desc != NULL); 4116 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4117 ioch = spdk_bdev_get_io_channel(desc); 4118 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4119 4120 fn_table.submit_request = stub_submit_request; 4121 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4122 4123 /* First test that if the bdev supports write_zeroes, the request won't be split */ 4124 bdev->md_len = 0; 4125 bdev->blocklen = 4096; 4126 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4127 4128 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4129 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4130 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4131 CU_ASSERT_EQUAL(rc, 0); 4132 num_completed = stub_complete_io(1); 4133 CU_ASSERT_EQUAL(num_completed, 1); 4134 4135 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 4136 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 4137 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4138 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 4139 num_requests = 2; 4140 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 4141 4142 for (offset = 0; offset < num_requests; ++offset) { 4143 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4144 offset * num_io_blocks, num_io_blocks, 0); 4145 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4146 } 4147 4148 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4149 CU_ASSERT_EQUAL(rc, 0); 4150 num_completed = stub_complete_io(num_requests); 4151 CU_ASSERT_EQUAL(num_completed, num_requests); 4152 4153 /* Check that the splitting is correct if bdev has interleaved metadata */ 4154 bdev->md_interleave = true; 4155 bdev->md_len = 64; 4156 bdev->blocklen = 4096 + 64; 4157 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4158 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4159 4160 num_requests = offset = 0; 4161 while (offset < num_blocks) { 4162 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 4163 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4164 offset, num_io_blocks, 0); 4165 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4166 offset += num_io_blocks; 4167 num_requests++; 4168 } 4169 4170 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4171 CU_ASSERT_EQUAL(rc, 0); 4172 num_completed = stub_complete_io(num_requests); 4173 CU_ASSERT_EQUAL(num_completed, num_requests); 4174 num_completed = stub_complete_io(num_requests); 4175 assert(num_completed == 0); 4176 4177 /* Check the the same for separate metadata buffer */ 4178 bdev->md_interleave = false; 4179 bdev->md_len = 64; 4180 bdev->blocklen = 4096; 4181 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4182 4183 num_requests = offset = 0; 4184 while (offset < num_blocks) { 4185 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 4186 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4187 offset, num_io_blocks, 0); 4188 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 4189 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4190 offset += num_io_blocks; 4191 num_requests++; 4192 } 4193 4194 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4195 CU_ASSERT_EQUAL(rc, 0); 4196 num_completed = stub_complete_io(num_requests); 4197 CU_ASSERT_EQUAL(num_completed, num_requests); 4198 4199 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 4200 spdk_put_io_channel(ioch); 4201 spdk_bdev_close(desc); 4202 free_bdev(bdev); 4203 ut_fini_bdev(); 4204 } 4205 4206 static void 4207 bdev_zcopy_write(void) 4208 { 4209 struct spdk_bdev *bdev; 4210 struct spdk_bdev_desc *desc = NULL; 4211 struct spdk_io_channel *ioch; 4212 struct ut_expected_io *expected_io; 4213 uint64_t offset, num_blocks; 4214 uint32_t num_completed; 4215 char aa_buf[512]; 4216 struct iovec iov; 4217 int rc; 4218 const bool populate = false; 4219 const bool commit = true; 4220 4221 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4222 4223 ut_init_bdev(NULL); 4224 bdev = allocate_bdev("bdev"); 4225 4226 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4227 CU_ASSERT_EQUAL(rc, 0); 4228 SPDK_CU_ASSERT_FATAL(desc != NULL); 4229 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4230 ioch = spdk_bdev_get_io_channel(desc); 4231 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4232 4233 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4234 4235 offset = 50; 4236 num_blocks = 1; 4237 iov.iov_base = NULL; 4238 iov.iov_len = 0; 4239 4240 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4241 g_zcopy_read_buf_len = (uint32_t) -1; 4242 /* Do a zcopy start for a write (populate=false) */ 4243 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4244 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4245 g_io_done = false; 4246 g_zcopy_write_buf = aa_buf; 4247 g_zcopy_write_buf_len = sizeof(aa_buf); 4248 g_zcopy_bdev_io = NULL; 4249 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4250 CU_ASSERT_EQUAL(rc, 0); 4251 num_completed = stub_complete_io(1); 4252 CU_ASSERT_EQUAL(num_completed, 1); 4253 CU_ASSERT(g_io_done == true); 4254 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4255 /* Check that the iov has been set up */ 4256 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4257 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4258 /* Check that the bdev_io has been saved */ 4259 CU_ASSERT(g_zcopy_bdev_io != NULL); 4260 /* Now do the zcopy end for a write (commit=true) */ 4261 g_io_done = false; 4262 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4263 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4264 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4265 CU_ASSERT_EQUAL(rc, 0); 4266 num_completed = stub_complete_io(1); 4267 CU_ASSERT_EQUAL(num_completed, 1); 4268 CU_ASSERT(g_io_done == true); 4269 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4270 /* Check the g_zcopy are reset by io_done */ 4271 CU_ASSERT(g_zcopy_write_buf == NULL); 4272 CU_ASSERT(g_zcopy_write_buf_len == 0); 4273 /* Check that io_done has freed the g_zcopy_bdev_io */ 4274 CU_ASSERT(g_zcopy_bdev_io == NULL); 4275 4276 /* Check the zcopy read buffer has not been touched which 4277 * ensures that the correct buffers were used. 4278 */ 4279 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4280 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4281 4282 spdk_put_io_channel(ioch); 4283 spdk_bdev_close(desc); 4284 free_bdev(bdev); 4285 ut_fini_bdev(); 4286 } 4287 4288 static void 4289 bdev_zcopy_read(void) 4290 { 4291 struct spdk_bdev *bdev; 4292 struct spdk_bdev_desc *desc = NULL; 4293 struct spdk_io_channel *ioch; 4294 struct ut_expected_io *expected_io; 4295 uint64_t offset, num_blocks; 4296 uint32_t num_completed; 4297 char aa_buf[512]; 4298 struct iovec iov; 4299 int rc; 4300 const bool populate = true; 4301 const bool commit = false; 4302 4303 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4304 4305 ut_init_bdev(NULL); 4306 bdev = allocate_bdev("bdev"); 4307 4308 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4309 CU_ASSERT_EQUAL(rc, 0); 4310 SPDK_CU_ASSERT_FATAL(desc != NULL); 4311 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4312 ioch = spdk_bdev_get_io_channel(desc); 4313 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4314 4315 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4316 4317 offset = 50; 4318 num_blocks = 1; 4319 iov.iov_base = NULL; 4320 iov.iov_len = 0; 4321 4322 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4323 g_zcopy_write_buf_len = (uint32_t) -1; 4324 4325 /* Do a zcopy start for a read (populate=true) */ 4326 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4327 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4328 g_io_done = false; 4329 g_zcopy_read_buf = aa_buf; 4330 g_zcopy_read_buf_len = sizeof(aa_buf); 4331 g_zcopy_bdev_io = NULL; 4332 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4333 CU_ASSERT_EQUAL(rc, 0); 4334 num_completed = stub_complete_io(1); 4335 CU_ASSERT_EQUAL(num_completed, 1); 4336 CU_ASSERT(g_io_done == true); 4337 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4338 /* Check that the iov has been set up */ 4339 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4340 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4341 /* Check that the bdev_io has been saved */ 4342 CU_ASSERT(g_zcopy_bdev_io != NULL); 4343 4344 /* Now do the zcopy end for a read (commit=false) */ 4345 g_io_done = false; 4346 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4347 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4348 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4349 CU_ASSERT_EQUAL(rc, 0); 4350 num_completed = stub_complete_io(1); 4351 CU_ASSERT_EQUAL(num_completed, 1); 4352 CU_ASSERT(g_io_done == true); 4353 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4354 /* Check the g_zcopy are reset by io_done */ 4355 CU_ASSERT(g_zcopy_read_buf == NULL); 4356 CU_ASSERT(g_zcopy_read_buf_len == 0); 4357 /* Check that io_done has freed the g_zcopy_bdev_io */ 4358 CU_ASSERT(g_zcopy_bdev_io == NULL); 4359 4360 /* Check the zcopy write buffer has not been touched which 4361 * ensures that the correct buffers were used. 4362 */ 4363 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4364 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4365 4366 spdk_put_io_channel(ioch); 4367 spdk_bdev_close(desc); 4368 free_bdev(bdev); 4369 ut_fini_bdev(); 4370 } 4371 4372 static void 4373 bdev_open_while_hotremove(void) 4374 { 4375 struct spdk_bdev *bdev; 4376 struct spdk_bdev_desc *desc[2] = {}; 4377 int rc; 4378 4379 bdev = allocate_bdev("bdev"); 4380 4381 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4382 CU_ASSERT(rc == 0); 4383 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4384 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4385 4386 spdk_bdev_unregister(bdev, NULL, NULL); 4387 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4388 poll_threads(); 4389 4390 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4391 CU_ASSERT(rc == -ENODEV); 4392 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4393 4394 spdk_bdev_close(desc[0]); 4395 free_bdev(bdev); 4396 } 4397 4398 static void 4399 bdev_close_while_hotremove(void) 4400 { 4401 struct spdk_bdev *bdev; 4402 struct spdk_bdev_desc *desc = NULL; 4403 int rc = 0; 4404 4405 bdev = allocate_bdev("bdev"); 4406 4407 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4408 CU_ASSERT_EQUAL(rc, 0); 4409 SPDK_CU_ASSERT_FATAL(desc != NULL); 4410 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4411 4412 /* Simulate hot-unplug by unregistering bdev */ 4413 g_event_type1 = 0xFF; 4414 g_unregister_arg = NULL; 4415 g_unregister_rc = -1; 4416 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4417 /* Close device while remove event is in flight */ 4418 spdk_bdev_close(desc); 4419 4420 /* Ensure that unregister callback is delayed */ 4421 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4422 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4423 4424 poll_threads(); 4425 4426 /* Event callback shall not be issued because device was closed */ 4427 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4428 /* Unregister callback is issued */ 4429 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4430 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4431 4432 free_bdev(bdev); 4433 } 4434 4435 static void 4436 bdev_open_ext_test(void) 4437 { 4438 struct spdk_bdev *bdev; 4439 struct spdk_bdev_desc *desc1 = NULL; 4440 struct spdk_bdev_desc *desc2 = NULL; 4441 int rc = 0; 4442 4443 bdev = allocate_bdev("bdev"); 4444 4445 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4446 CU_ASSERT_EQUAL(rc, -EINVAL); 4447 4448 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4449 CU_ASSERT_EQUAL(rc, 0); 4450 4451 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4452 CU_ASSERT_EQUAL(rc, 0); 4453 4454 g_event_type1 = 0xFF; 4455 g_event_type2 = 0xFF; 4456 4457 /* Simulate hot-unplug by unregistering bdev */ 4458 spdk_bdev_unregister(bdev, NULL, NULL); 4459 poll_threads(); 4460 4461 /* Check if correct events have been triggered in event callback fn */ 4462 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4463 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4464 4465 free_bdev(bdev); 4466 poll_threads(); 4467 } 4468 4469 static void 4470 bdev_open_ext_unregister(void) 4471 { 4472 struct spdk_bdev *bdev; 4473 struct spdk_bdev_desc *desc1 = NULL; 4474 struct spdk_bdev_desc *desc2 = NULL; 4475 struct spdk_bdev_desc *desc3 = NULL; 4476 struct spdk_bdev_desc *desc4 = NULL; 4477 int rc = 0; 4478 4479 bdev = allocate_bdev("bdev"); 4480 4481 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4482 CU_ASSERT_EQUAL(rc, -EINVAL); 4483 4484 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4485 CU_ASSERT_EQUAL(rc, 0); 4486 4487 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4488 CU_ASSERT_EQUAL(rc, 0); 4489 4490 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4491 CU_ASSERT_EQUAL(rc, 0); 4492 4493 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4494 CU_ASSERT_EQUAL(rc, 0); 4495 4496 g_event_type1 = 0xFF; 4497 g_event_type2 = 0xFF; 4498 g_event_type3 = 0xFF; 4499 g_event_type4 = 0xFF; 4500 4501 g_unregister_arg = NULL; 4502 g_unregister_rc = -1; 4503 4504 /* Simulate hot-unplug by unregistering bdev */ 4505 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4506 4507 /* 4508 * Unregister is handled asynchronously and event callback 4509 * (i.e., above bdev_open_cbN) will be called. 4510 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4511 * close the desc3 and desc4 so that the bdev is not closed. 4512 */ 4513 poll_threads(); 4514 4515 /* Check if correct events have been triggered in event callback fn */ 4516 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4517 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4518 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4519 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4520 4521 /* Check that unregister callback is delayed */ 4522 CU_ASSERT(g_unregister_arg == NULL); 4523 CU_ASSERT(g_unregister_rc == -1); 4524 4525 /* 4526 * Explicitly close desc3. As desc4 is still opened there, the 4527 * unergister callback is still delayed to execute. 4528 */ 4529 spdk_bdev_close(desc3); 4530 CU_ASSERT(g_unregister_arg == NULL); 4531 CU_ASSERT(g_unregister_rc == -1); 4532 4533 /* 4534 * Explicitly close desc4 to trigger the ongoing bdev unregister 4535 * operation after last desc is closed. 4536 */ 4537 spdk_bdev_close(desc4); 4538 4539 /* Poll the thread for the async unregister operation */ 4540 poll_threads(); 4541 4542 /* Check that unregister callback is executed */ 4543 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4544 CU_ASSERT(g_unregister_rc == 0); 4545 4546 free_bdev(bdev); 4547 poll_threads(); 4548 } 4549 4550 struct timeout_io_cb_arg { 4551 struct iovec iov; 4552 uint8_t type; 4553 }; 4554 4555 static int 4556 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4557 { 4558 struct spdk_bdev_io *bdev_io; 4559 int n = 0; 4560 4561 if (!ch) { 4562 return -1; 4563 } 4564 4565 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4566 n++; 4567 } 4568 4569 return n; 4570 } 4571 4572 static void 4573 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4574 { 4575 struct timeout_io_cb_arg *ctx = cb_arg; 4576 4577 ctx->type = bdev_io->type; 4578 ctx->iov.iov_base = bdev_io->iov.iov_base; 4579 ctx->iov.iov_len = bdev_io->iov.iov_len; 4580 } 4581 4582 static void 4583 bdev_set_io_timeout(void) 4584 { 4585 struct spdk_bdev *bdev; 4586 struct spdk_bdev_desc *desc = NULL; 4587 struct spdk_io_channel *io_ch = NULL; 4588 struct spdk_bdev_channel *bdev_ch = NULL; 4589 struct timeout_io_cb_arg cb_arg; 4590 4591 ut_init_bdev(NULL); 4592 bdev = allocate_bdev("bdev"); 4593 4594 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4595 SPDK_CU_ASSERT_FATAL(desc != NULL); 4596 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4597 4598 io_ch = spdk_bdev_get_io_channel(desc); 4599 CU_ASSERT(io_ch != NULL); 4600 4601 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4602 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4603 4604 /* This is the part1. 4605 * We will check the bdev_ch->io_submitted list 4606 * TO make sure that it can link IOs and only the user submitted IOs 4607 */ 4608 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4609 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4610 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4611 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4612 stub_complete_io(1); 4613 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4614 stub_complete_io(1); 4615 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4616 4617 /* Split IO */ 4618 bdev->optimal_io_boundary = 16; 4619 bdev->split_on_optimal_io_boundary = true; 4620 4621 /* Now test that a single-vector command is split correctly. 4622 * Offset 14, length 8, payload 0xF000 4623 * Child - Offset 14, length 2, payload 0xF000 4624 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4625 * 4626 * Set up the expected values before calling spdk_bdev_read_blocks 4627 */ 4628 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4629 /* We count all submitted IOs including IO that are generated by splitting. */ 4630 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4631 stub_complete_io(1); 4632 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4633 stub_complete_io(1); 4634 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4635 4636 /* Also include the reset IO */ 4637 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4638 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4639 poll_threads(); 4640 stub_complete_io(1); 4641 poll_threads(); 4642 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4643 4644 /* This is part2 4645 * Test the desc timeout poller register 4646 */ 4647 4648 /* Successfully set the timeout */ 4649 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4650 CU_ASSERT(desc->io_timeout_poller != NULL); 4651 CU_ASSERT(desc->timeout_in_sec == 30); 4652 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4653 CU_ASSERT(desc->cb_arg == &cb_arg); 4654 4655 /* Change the timeout limit */ 4656 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4657 CU_ASSERT(desc->io_timeout_poller != NULL); 4658 CU_ASSERT(desc->timeout_in_sec == 20); 4659 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4660 CU_ASSERT(desc->cb_arg == &cb_arg); 4661 4662 /* Disable the timeout */ 4663 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4664 CU_ASSERT(desc->io_timeout_poller == NULL); 4665 4666 /* This the part3 4667 * We will test to catch timeout IO and check whether the IO is 4668 * the submitted one. 4669 */ 4670 memset(&cb_arg, 0, sizeof(cb_arg)); 4671 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4672 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4673 4674 /* Don't reach the limit */ 4675 spdk_delay_us(15 * spdk_get_ticks_hz()); 4676 poll_threads(); 4677 CU_ASSERT(cb_arg.type == 0); 4678 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4679 CU_ASSERT(cb_arg.iov.iov_len == 0); 4680 4681 /* 15 + 15 = 30 reach the limit */ 4682 spdk_delay_us(15 * spdk_get_ticks_hz()); 4683 poll_threads(); 4684 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4685 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4686 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4687 stub_complete_io(1); 4688 4689 /* Use the same split IO above and check the IO */ 4690 memset(&cb_arg, 0, sizeof(cb_arg)); 4691 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4692 4693 /* The first child complete in time */ 4694 spdk_delay_us(15 * spdk_get_ticks_hz()); 4695 poll_threads(); 4696 stub_complete_io(1); 4697 CU_ASSERT(cb_arg.type == 0); 4698 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4699 CU_ASSERT(cb_arg.iov.iov_len == 0); 4700 4701 /* The second child reach the limit */ 4702 spdk_delay_us(15 * spdk_get_ticks_hz()); 4703 poll_threads(); 4704 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4705 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4706 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4707 stub_complete_io(1); 4708 4709 /* Also include the reset IO */ 4710 memset(&cb_arg, 0, sizeof(cb_arg)); 4711 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4712 spdk_delay_us(30 * spdk_get_ticks_hz()); 4713 poll_threads(); 4714 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4715 stub_complete_io(1); 4716 poll_threads(); 4717 4718 spdk_put_io_channel(io_ch); 4719 spdk_bdev_close(desc); 4720 free_bdev(bdev); 4721 ut_fini_bdev(); 4722 } 4723 4724 static void 4725 bdev_set_qd_sampling(void) 4726 { 4727 struct spdk_bdev *bdev; 4728 struct spdk_bdev_desc *desc = NULL; 4729 struct spdk_io_channel *io_ch = NULL; 4730 struct spdk_bdev_channel *bdev_ch = NULL; 4731 struct timeout_io_cb_arg cb_arg; 4732 4733 ut_init_bdev(NULL); 4734 bdev = allocate_bdev("bdev"); 4735 4736 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4737 SPDK_CU_ASSERT_FATAL(desc != NULL); 4738 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4739 4740 io_ch = spdk_bdev_get_io_channel(desc); 4741 CU_ASSERT(io_ch != NULL); 4742 4743 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4744 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4745 4746 /* This is the part1. 4747 * We will check the bdev_ch->io_submitted list 4748 * TO make sure that it can link IOs and only the user submitted IOs 4749 */ 4750 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4751 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4752 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4753 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4754 stub_complete_io(1); 4755 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4756 stub_complete_io(1); 4757 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4758 4759 /* This is the part2. 4760 * Test the bdev's qd poller register 4761 */ 4762 /* 1st Successfully set the qd sampling period */ 4763 spdk_bdev_set_qd_sampling_period(bdev, 10); 4764 CU_ASSERT(bdev->internal.new_period == 10); 4765 CU_ASSERT(bdev->internal.period == 10); 4766 CU_ASSERT(bdev->internal.qd_desc != NULL); 4767 poll_threads(); 4768 CU_ASSERT(bdev->internal.qd_poller != NULL); 4769 4770 /* 2nd Change the qd sampling period */ 4771 spdk_bdev_set_qd_sampling_period(bdev, 20); 4772 CU_ASSERT(bdev->internal.new_period == 20); 4773 CU_ASSERT(bdev->internal.period == 10); 4774 CU_ASSERT(bdev->internal.qd_desc != NULL); 4775 poll_threads(); 4776 CU_ASSERT(bdev->internal.qd_poller != NULL); 4777 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4778 4779 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4780 spdk_delay_us(20); 4781 poll_thread_times(0, 1); 4782 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4783 spdk_bdev_set_qd_sampling_period(bdev, 30); 4784 CU_ASSERT(bdev->internal.new_period == 30); 4785 CU_ASSERT(bdev->internal.period == 20); 4786 poll_threads(); 4787 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4788 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4789 4790 /* 4th Disable the qd sampling period */ 4791 spdk_bdev_set_qd_sampling_period(bdev, 0); 4792 CU_ASSERT(bdev->internal.new_period == 0); 4793 CU_ASSERT(bdev->internal.period == 30); 4794 poll_threads(); 4795 CU_ASSERT(bdev->internal.qd_poller == NULL); 4796 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4797 CU_ASSERT(bdev->internal.qd_desc == NULL); 4798 4799 /* This is the part3. 4800 * We will test the submitted IO and reset works 4801 * properly with the qd sampling. 4802 */ 4803 memset(&cb_arg, 0, sizeof(cb_arg)); 4804 spdk_bdev_set_qd_sampling_period(bdev, 1); 4805 poll_threads(); 4806 4807 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4808 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4809 4810 /* Also include the reset IO */ 4811 memset(&cb_arg, 0, sizeof(cb_arg)); 4812 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4813 poll_threads(); 4814 4815 /* Close the desc */ 4816 spdk_put_io_channel(io_ch); 4817 spdk_bdev_close(desc); 4818 4819 /* Complete the submitted IO and reset */ 4820 stub_complete_io(2); 4821 poll_threads(); 4822 4823 free_bdev(bdev); 4824 ut_fini_bdev(); 4825 } 4826 4827 static void 4828 lba_range_overlap(void) 4829 { 4830 struct lba_range r1, r2; 4831 4832 r1.offset = 100; 4833 r1.length = 50; 4834 4835 r2.offset = 0; 4836 r2.length = 1; 4837 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4838 4839 r2.offset = 0; 4840 r2.length = 100; 4841 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4842 4843 r2.offset = 0; 4844 r2.length = 110; 4845 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4846 4847 r2.offset = 100; 4848 r2.length = 10; 4849 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4850 4851 r2.offset = 110; 4852 r2.length = 20; 4853 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4854 4855 r2.offset = 140; 4856 r2.length = 150; 4857 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4858 4859 r2.offset = 130; 4860 r2.length = 200; 4861 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4862 4863 r2.offset = 150; 4864 r2.length = 100; 4865 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4866 4867 r2.offset = 110; 4868 r2.length = 0; 4869 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4870 } 4871 4872 static bool g_lock_lba_range_done; 4873 static bool g_unlock_lba_range_done; 4874 4875 static void 4876 lock_lba_range_done(struct lba_range *range, void *ctx, int status) 4877 { 4878 g_lock_lba_range_done = true; 4879 } 4880 4881 static void 4882 unlock_lba_range_done(struct lba_range *range, void *ctx, int status) 4883 { 4884 g_unlock_lba_range_done = true; 4885 } 4886 4887 static void 4888 lock_lba_range_check_ranges(void) 4889 { 4890 struct spdk_bdev *bdev; 4891 struct spdk_bdev_desc *desc = NULL; 4892 struct spdk_io_channel *io_ch; 4893 struct spdk_bdev_channel *channel; 4894 struct lba_range *range; 4895 int ctx1; 4896 int rc; 4897 4898 ut_init_bdev(NULL); 4899 bdev = allocate_bdev("bdev0"); 4900 4901 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4902 CU_ASSERT(rc == 0); 4903 CU_ASSERT(desc != NULL); 4904 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4905 io_ch = spdk_bdev_get_io_channel(desc); 4906 CU_ASSERT(io_ch != NULL); 4907 channel = spdk_io_channel_get_ctx(io_ch); 4908 4909 g_lock_lba_range_done = false; 4910 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4911 CU_ASSERT(rc == 0); 4912 poll_threads(); 4913 4914 CU_ASSERT(g_lock_lba_range_done == true); 4915 range = TAILQ_FIRST(&channel->locked_ranges); 4916 SPDK_CU_ASSERT_FATAL(range != NULL); 4917 CU_ASSERT(range->offset == 20); 4918 CU_ASSERT(range->length == 10); 4919 CU_ASSERT(range->owner_ch == channel); 4920 4921 /* Unlocks must exactly match a lock. */ 4922 g_unlock_lba_range_done = false; 4923 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4924 CU_ASSERT(rc == -EINVAL); 4925 CU_ASSERT(g_unlock_lba_range_done == false); 4926 4927 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4928 CU_ASSERT(rc == 0); 4929 spdk_delay_us(100); 4930 poll_threads(); 4931 4932 CU_ASSERT(g_unlock_lba_range_done == true); 4933 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4934 4935 spdk_put_io_channel(io_ch); 4936 spdk_bdev_close(desc); 4937 free_bdev(bdev); 4938 ut_fini_bdev(); 4939 } 4940 4941 static void 4942 lock_lba_range_with_io_outstanding(void) 4943 { 4944 struct spdk_bdev *bdev; 4945 struct spdk_bdev_desc *desc = NULL; 4946 struct spdk_io_channel *io_ch; 4947 struct spdk_bdev_channel *channel; 4948 struct lba_range *range; 4949 char buf[4096]; 4950 int ctx1; 4951 int rc; 4952 4953 ut_init_bdev(NULL); 4954 bdev = allocate_bdev("bdev0"); 4955 4956 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4957 CU_ASSERT(rc == 0); 4958 CU_ASSERT(desc != NULL); 4959 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4960 io_ch = spdk_bdev_get_io_channel(desc); 4961 CU_ASSERT(io_ch != NULL); 4962 channel = spdk_io_channel_get_ctx(io_ch); 4963 4964 g_io_done = false; 4965 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4966 CU_ASSERT(rc == 0); 4967 4968 g_lock_lba_range_done = false; 4969 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4970 CU_ASSERT(rc == 0); 4971 poll_threads(); 4972 4973 /* The lock should immediately become valid, since there are no outstanding 4974 * write I/O. 4975 */ 4976 CU_ASSERT(g_io_done == false); 4977 CU_ASSERT(g_lock_lba_range_done == true); 4978 range = TAILQ_FIRST(&channel->locked_ranges); 4979 SPDK_CU_ASSERT_FATAL(range != NULL); 4980 CU_ASSERT(range->offset == 20); 4981 CU_ASSERT(range->length == 10); 4982 CU_ASSERT(range->owner_ch == channel); 4983 CU_ASSERT(range->locked_ctx == &ctx1); 4984 4985 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4986 CU_ASSERT(rc == 0); 4987 stub_complete_io(1); 4988 spdk_delay_us(100); 4989 poll_threads(); 4990 4991 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4992 4993 /* Now try again, but with a write I/O. */ 4994 g_io_done = false; 4995 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4996 CU_ASSERT(rc == 0); 4997 4998 g_lock_lba_range_done = false; 4999 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5000 CU_ASSERT(rc == 0); 5001 poll_threads(); 5002 5003 /* The lock should not be fully valid yet, since a write I/O is outstanding. 5004 * But note that the range should be on the channel's locked_list, to make sure no 5005 * new write I/O are started. 5006 */ 5007 CU_ASSERT(g_io_done == false); 5008 CU_ASSERT(g_lock_lba_range_done == false); 5009 range = TAILQ_FIRST(&channel->locked_ranges); 5010 SPDK_CU_ASSERT_FATAL(range != NULL); 5011 CU_ASSERT(range->offset == 20); 5012 CU_ASSERT(range->length == 10); 5013 5014 /* Complete the write I/O. This should make the lock valid (checked by confirming 5015 * our callback was invoked). 5016 */ 5017 stub_complete_io(1); 5018 spdk_delay_us(100); 5019 poll_threads(); 5020 CU_ASSERT(g_io_done == true); 5021 CU_ASSERT(g_lock_lba_range_done == true); 5022 5023 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5024 CU_ASSERT(rc == 0); 5025 poll_threads(); 5026 5027 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5028 5029 spdk_put_io_channel(io_ch); 5030 spdk_bdev_close(desc); 5031 free_bdev(bdev); 5032 ut_fini_bdev(); 5033 } 5034 5035 static void 5036 lock_lba_range_overlapped(void) 5037 { 5038 struct spdk_bdev *bdev; 5039 struct spdk_bdev_desc *desc = NULL; 5040 struct spdk_io_channel *io_ch; 5041 struct spdk_bdev_channel *channel; 5042 struct lba_range *range; 5043 int ctx1; 5044 int rc; 5045 5046 ut_init_bdev(NULL); 5047 bdev = allocate_bdev("bdev0"); 5048 5049 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5050 CU_ASSERT(rc == 0); 5051 CU_ASSERT(desc != NULL); 5052 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5053 io_ch = spdk_bdev_get_io_channel(desc); 5054 CU_ASSERT(io_ch != NULL); 5055 channel = spdk_io_channel_get_ctx(io_ch); 5056 5057 /* Lock range 20-29. */ 5058 g_lock_lba_range_done = false; 5059 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5060 CU_ASSERT(rc == 0); 5061 poll_threads(); 5062 5063 CU_ASSERT(g_lock_lba_range_done == true); 5064 range = TAILQ_FIRST(&channel->locked_ranges); 5065 SPDK_CU_ASSERT_FATAL(range != NULL); 5066 CU_ASSERT(range->offset == 20); 5067 CU_ASSERT(range->length == 10); 5068 5069 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 5070 * 20-29. 5071 */ 5072 g_lock_lba_range_done = false; 5073 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 5074 CU_ASSERT(rc == 0); 5075 poll_threads(); 5076 5077 CU_ASSERT(g_lock_lba_range_done == false); 5078 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5079 SPDK_CU_ASSERT_FATAL(range != NULL); 5080 CU_ASSERT(range->offset == 25); 5081 CU_ASSERT(range->length == 15); 5082 5083 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 5084 * no longer overlaps with an active lock. 5085 */ 5086 g_unlock_lba_range_done = false; 5087 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5088 CU_ASSERT(rc == 0); 5089 poll_threads(); 5090 5091 CU_ASSERT(g_unlock_lba_range_done == true); 5092 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5093 range = TAILQ_FIRST(&channel->locked_ranges); 5094 SPDK_CU_ASSERT_FATAL(range != NULL); 5095 CU_ASSERT(range->offset == 25); 5096 CU_ASSERT(range->length == 15); 5097 5098 /* Lock 40-59. This should immediately lock since it does not overlap with the 5099 * currently active 25-39 lock. 5100 */ 5101 g_lock_lba_range_done = false; 5102 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 5103 CU_ASSERT(rc == 0); 5104 poll_threads(); 5105 5106 CU_ASSERT(g_lock_lba_range_done == true); 5107 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5108 SPDK_CU_ASSERT_FATAL(range != NULL); 5109 range = TAILQ_NEXT(range, tailq); 5110 SPDK_CU_ASSERT_FATAL(range != NULL); 5111 CU_ASSERT(range->offset == 40); 5112 CU_ASSERT(range->length == 20); 5113 5114 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 5115 g_lock_lba_range_done = false; 5116 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 5117 CU_ASSERT(rc == 0); 5118 poll_threads(); 5119 5120 CU_ASSERT(g_lock_lba_range_done == false); 5121 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5122 SPDK_CU_ASSERT_FATAL(range != NULL); 5123 CU_ASSERT(range->offset == 35); 5124 CU_ASSERT(range->length == 10); 5125 5126 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 5127 * the 40-59 lock is still active. 5128 */ 5129 g_unlock_lba_range_done = false; 5130 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 5131 CU_ASSERT(rc == 0); 5132 poll_threads(); 5133 5134 CU_ASSERT(g_unlock_lba_range_done == true); 5135 CU_ASSERT(g_lock_lba_range_done == false); 5136 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5137 SPDK_CU_ASSERT_FATAL(range != NULL); 5138 CU_ASSERT(range->offset == 35); 5139 CU_ASSERT(range->length == 10); 5140 5141 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 5142 * no longer any active overlapping locks. 5143 */ 5144 g_unlock_lba_range_done = false; 5145 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 5146 CU_ASSERT(rc == 0); 5147 poll_threads(); 5148 5149 CU_ASSERT(g_unlock_lba_range_done == true); 5150 CU_ASSERT(g_lock_lba_range_done == true); 5151 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5152 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5153 SPDK_CU_ASSERT_FATAL(range != NULL); 5154 CU_ASSERT(range->offset == 35); 5155 CU_ASSERT(range->length == 10); 5156 5157 /* Finally, unlock 35-44. */ 5158 g_unlock_lba_range_done = false; 5159 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 5160 CU_ASSERT(rc == 0); 5161 poll_threads(); 5162 5163 CU_ASSERT(g_unlock_lba_range_done == true); 5164 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 5165 5166 spdk_put_io_channel(io_ch); 5167 spdk_bdev_close(desc); 5168 free_bdev(bdev); 5169 ut_fini_bdev(); 5170 } 5171 5172 static void 5173 bdev_quiesce_done(void *ctx, int status) 5174 { 5175 g_lock_lba_range_done = true; 5176 } 5177 5178 static void 5179 bdev_unquiesce_done(void *ctx, int status) 5180 { 5181 g_unlock_lba_range_done = true; 5182 } 5183 5184 static void 5185 bdev_quiesce_done_unquiesce(void *ctx, int status) 5186 { 5187 struct spdk_bdev *bdev = ctx; 5188 int rc; 5189 5190 g_lock_lba_range_done = true; 5191 5192 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL); 5193 CU_ASSERT(rc == 0); 5194 } 5195 5196 static void 5197 bdev_quiesce(void) 5198 { 5199 struct spdk_bdev *bdev; 5200 struct spdk_bdev_desc *desc = NULL; 5201 struct spdk_io_channel *io_ch; 5202 struct spdk_bdev_channel *channel; 5203 struct lba_range *range; 5204 struct spdk_bdev_io *bdev_io; 5205 int ctx1; 5206 int rc; 5207 5208 ut_init_bdev(NULL); 5209 bdev = allocate_bdev("bdev0"); 5210 5211 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5212 CU_ASSERT(rc == 0); 5213 CU_ASSERT(desc != NULL); 5214 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5215 io_ch = spdk_bdev_get_io_channel(desc); 5216 CU_ASSERT(io_ch != NULL); 5217 channel = spdk_io_channel_get_ctx(io_ch); 5218 5219 g_lock_lba_range_done = false; 5220 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5221 CU_ASSERT(rc == 0); 5222 poll_threads(); 5223 5224 CU_ASSERT(g_lock_lba_range_done == true); 5225 range = TAILQ_FIRST(&channel->locked_ranges); 5226 SPDK_CU_ASSERT_FATAL(range != NULL); 5227 CU_ASSERT(range->offset == 0); 5228 CU_ASSERT(range->length == bdev->blockcnt); 5229 CU_ASSERT(range->owner_ch == NULL); 5230 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5231 SPDK_CU_ASSERT_FATAL(range != NULL); 5232 CU_ASSERT(range->offset == 0); 5233 CU_ASSERT(range->length == bdev->blockcnt); 5234 CU_ASSERT(range->owner_ch == NULL); 5235 5236 g_unlock_lba_range_done = false; 5237 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5238 CU_ASSERT(rc == 0); 5239 spdk_delay_us(100); 5240 poll_threads(); 5241 5242 CU_ASSERT(g_unlock_lba_range_done == true); 5243 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5244 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5245 5246 g_lock_lba_range_done = false; 5247 rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1); 5248 CU_ASSERT(rc == 0); 5249 poll_threads(); 5250 5251 CU_ASSERT(g_lock_lba_range_done == true); 5252 range = TAILQ_FIRST(&channel->locked_ranges); 5253 SPDK_CU_ASSERT_FATAL(range != NULL); 5254 CU_ASSERT(range->offset == 20); 5255 CU_ASSERT(range->length == 10); 5256 CU_ASSERT(range->owner_ch == NULL); 5257 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5258 SPDK_CU_ASSERT_FATAL(range != NULL); 5259 CU_ASSERT(range->offset == 20); 5260 CU_ASSERT(range->length == 10); 5261 CU_ASSERT(range->owner_ch == NULL); 5262 5263 /* Unlocks must exactly match a lock. */ 5264 g_unlock_lba_range_done = false; 5265 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1); 5266 CU_ASSERT(rc == -EINVAL); 5267 CU_ASSERT(g_unlock_lba_range_done == false); 5268 5269 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1); 5270 CU_ASSERT(rc == 0); 5271 spdk_delay_us(100); 5272 poll_threads(); 5273 5274 CU_ASSERT(g_unlock_lba_range_done == true); 5275 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5276 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5277 5278 /* Test unquiesce from quiesce cb */ 5279 g_lock_lba_range_done = false; 5280 g_unlock_lba_range_done = false; 5281 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev); 5282 CU_ASSERT(rc == 0); 5283 poll_threads(); 5284 5285 CU_ASSERT(g_lock_lba_range_done == true); 5286 CU_ASSERT(g_unlock_lba_range_done == true); 5287 5288 /* Test quiesce with read I/O */ 5289 g_lock_lba_range_done = false; 5290 g_unlock_lba_range_done = false; 5291 g_io_done = false; 5292 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5293 CU_ASSERT(rc == 0); 5294 5295 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5296 CU_ASSERT(rc == 0); 5297 poll_threads(); 5298 5299 CU_ASSERT(g_io_done == false); 5300 CU_ASSERT(g_lock_lba_range_done == false); 5301 range = TAILQ_FIRST(&channel->locked_ranges); 5302 SPDK_CU_ASSERT_FATAL(range != NULL); 5303 5304 stub_complete_io(1); 5305 spdk_delay_us(100); 5306 poll_threads(); 5307 CU_ASSERT(g_io_done == true); 5308 CU_ASSERT(g_lock_lba_range_done == true); 5309 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5310 5311 g_io_done = false; 5312 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5313 CU_ASSERT(rc == 0); 5314 5315 bdev_io = TAILQ_FIRST(&channel->io_locked); 5316 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 5317 CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20); 5318 CU_ASSERT(bdev_io->u.bdev.num_blocks == 1); 5319 5320 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5321 CU_ASSERT(rc == 0); 5322 spdk_delay_us(100); 5323 poll_threads(); 5324 5325 CU_ASSERT(g_unlock_lba_range_done == true); 5326 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5327 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5328 5329 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5330 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 5331 poll_threads(); 5332 CU_ASSERT(g_io_done == true); 5333 5334 spdk_put_io_channel(io_ch); 5335 spdk_bdev_close(desc); 5336 free_bdev(bdev); 5337 ut_fini_bdev(); 5338 } 5339 5340 static void 5341 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 5342 { 5343 g_abort_done = true; 5344 g_abort_status = bdev_io->internal.status; 5345 spdk_bdev_free_io(bdev_io); 5346 } 5347 5348 static void 5349 bdev_io_abort(void) 5350 { 5351 struct spdk_bdev *bdev; 5352 struct spdk_bdev_desc *desc = NULL; 5353 struct spdk_io_channel *io_ch; 5354 struct spdk_bdev_channel *channel; 5355 struct spdk_bdev_mgmt_channel *mgmt_ch; 5356 struct spdk_bdev_opts bdev_opts = {}; 5357 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 5358 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5359 int rc; 5360 5361 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5362 bdev_opts.bdev_io_pool_size = 7; 5363 bdev_opts.bdev_io_cache_size = 2; 5364 ut_init_bdev(&bdev_opts); 5365 5366 bdev = allocate_bdev("bdev0"); 5367 5368 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5369 CU_ASSERT(rc == 0); 5370 CU_ASSERT(desc != NULL); 5371 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5372 io_ch = spdk_bdev_get_io_channel(desc); 5373 CU_ASSERT(io_ch != NULL); 5374 channel = spdk_io_channel_get_ctx(io_ch); 5375 mgmt_ch = channel->shared_resource->mgmt_ch; 5376 5377 g_abort_done = false; 5378 5379 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5380 5381 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5382 CU_ASSERT(rc == -ENOTSUP); 5383 5384 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5385 5386 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5387 CU_ASSERT(rc == 0); 5388 CU_ASSERT(g_abort_done == true); 5389 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5390 5391 /* Test the case that the target I/O was successfully aborted. */ 5392 g_io_done = false; 5393 5394 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5395 CU_ASSERT(rc == 0); 5396 CU_ASSERT(g_io_done == false); 5397 5398 g_abort_done = false; 5399 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5400 5401 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5402 CU_ASSERT(rc == 0); 5403 CU_ASSERT(g_io_done == true); 5404 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5405 stub_complete_io(1); 5406 CU_ASSERT(g_abort_done == true); 5407 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5408 5409 /* Test the case that the target I/O was not aborted because it completed 5410 * in the middle of execution of the abort. 5411 */ 5412 g_io_done = false; 5413 5414 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5415 CU_ASSERT(rc == 0); 5416 CU_ASSERT(g_io_done == false); 5417 5418 g_abort_done = false; 5419 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5420 5421 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5422 CU_ASSERT(rc == 0); 5423 CU_ASSERT(g_io_done == false); 5424 5425 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5426 stub_complete_io(1); 5427 CU_ASSERT(g_io_done == true); 5428 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5429 5430 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5431 stub_complete_io(1); 5432 CU_ASSERT(g_abort_done == true); 5433 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5434 5435 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5436 5437 bdev->optimal_io_boundary = 16; 5438 bdev->split_on_optimal_io_boundary = true; 5439 5440 /* Test that a single-vector command which is split is aborted correctly. 5441 * Offset 14, length 8, payload 0xF000 5442 * Child - Offset 14, length 2, payload 0xF000 5443 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5444 */ 5445 g_io_done = false; 5446 5447 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5448 CU_ASSERT(rc == 0); 5449 CU_ASSERT(g_io_done == false); 5450 5451 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5452 5453 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5454 5455 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5456 CU_ASSERT(rc == 0); 5457 CU_ASSERT(g_io_done == true); 5458 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5459 stub_complete_io(2); 5460 CU_ASSERT(g_abort_done == true); 5461 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5462 5463 /* Test that a multi-vector command that needs to be split by strip and then 5464 * needs to be split is aborted correctly. Abort is requested before the second 5465 * child I/O was submitted. The parent I/O should complete with failure without 5466 * submitting the second child I/O. 5467 */ 5468 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5469 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5470 iov[i].iov_len = 512; 5471 } 5472 5473 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5474 g_io_done = false; 5475 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5476 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5477 CU_ASSERT(rc == 0); 5478 CU_ASSERT(g_io_done == false); 5479 5480 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5481 5482 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5483 5484 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5485 CU_ASSERT(rc == 0); 5486 CU_ASSERT(g_io_done == true); 5487 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5488 stub_complete_io(1); 5489 CU_ASSERT(g_abort_done == true); 5490 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5491 5492 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5493 5494 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5495 5496 bdev->optimal_io_boundary = 16; 5497 g_io_done = false; 5498 5499 /* Test that a ingle-vector command which is split is aborted correctly. 5500 * Differently from the above, the child abort request will be submitted 5501 * sequentially due to the capacity of spdk_bdev_io. 5502 */ 5503 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5504 CU_ASSERT(rc == 0); 5505 CU_ASSERT(g_io_done == false); 5506 5507 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5508 5509 g_abort_done = false; 5510 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5511 5512 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5513 CU_ASSERT(rc == 0); 5514 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5515 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5516 5517 stub_complete_io(1); 5518 CU_ASSERT(g_io_done == true); 5519 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5520 stub_complete_io(3); 5521 CU_ASSERT(g_abort_done == true); 5522 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5523 5524 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5525 5526 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5527 5528 spdk_put_io_channel(io_ch); 5529 spdk_bdev_close(desc); 5530 free_bdev(bdev); 5531 ut_fini_bdev(); 5532 } 5533 5534 static void 5535 bdev_unmap(void) 5536 { 5537 struct spdk_bdev *bdev; 5538 struct spdk_bdev_desc *desc = NULL; 5539 struct spdk_io_channel *ioch; 5540 struct spdk_bdev_channel *bdev_ch; 5541 struct ut_expected_io *expected_io; 5542 struct spdk_bdev_opts bdev_opts = {}; 5543 uint32_t i, num_outstanding; 5544 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5545 int rc; 5546 5547 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5548 bdev_opts.bdev_io_pool_size = 512; 5549 bdev_opts.bdev_io_cache_size = 64; 5550 ut_init_bdev(&bdev_opts); 5551 5552 bdev = allocate_bdev("bdev"); 5553 5554 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5555 CU_ASSERT_EQUAL(rc, 0); 5556 SPDK_CU_ASSERT_FATAL(desc != NULL); 5557 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5558 ioch = spdk_bdev_get_io_channel(desc); 5559 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5560 bdev_ch = spdk_io_channel_get_ctx(ioch); 5561 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5562 5563 fn_table.submit_request = stub_submit_request; 5564 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5565 5566 /* Case 1: First test the request won't be split */ 5567 num_blocks = 32; 5568 5569 g_io_done = false; 5570 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5571 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5572 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5573 CU_ASSERT_EQUAL(rc, 0); 5574 CU_ASSERT(g_io_done == false); 5575 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5576 stub_complete_io(1); 5577 CU_ASSERT(g_io_done == true); 5578 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5579 5580 /* Case 2: Test the split with 2 children requests */ 5581 bdev->max_unmap = 8; 5582 bdev->max_unmap_segments = 2; 5583 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5584 num_blocks = max_unmap_blocks * 2; 5585 offset = 0; 5586 5587 g_io_done = false; 5588 for (i = 0; i < 2; i++) { 5589 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5590 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5591 offset += max_unmap_blocks; 5592 } 5593 5594 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5595 CU_ASSERT_EQUAL(rc, 0); 5596 CU_ASSERT(g_io_done == false); 5597 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5598 stub_complete_io(2); 5599 CU_ASSERT(g_io_done == true); 5600 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5601 5602 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5603 num_children = 15; 5604 num_blocks = max_unmap_blocks * num_children; 5605 g_io_done = false; 5606 offset = 0; 5607 for (i = 0; i < num_children; i++) { 5608 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5609 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5610 offset += max_unmap_blocks; 5611 } 5612 5613 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5614 CU_ASSERT_EQUAL(rc, 0); 5615 CU_ASSERT(g_io_done == false); 5616 5617 while (num_children > 0) { 5618 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5619 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5620 stub_complete_io(num_outstanding); 5621 num_children -= num_outstanding; 5622 } 5623 CU_ASSERT(g_io_done == true); 5624 5625 spdk_put_io_channel(ioch); 5626 spdk_bdev_close(desc); 5627 free_bdev(bdev); 5628 ut_fini_bdev(); 5629 } 5630 5631 static void 5632 bdev_write_zeroes_split_test(void) 5633 { 5634 struct spdk_bdev *bdev; 5635 struct spdk_bdev_desc *desc = NULL; 5636 struct spdk_io_channel *ioch; 5637 struct spdk_bdev_channel *bdev_ch; 5638 struct ut_expected_io *expected_io; 5639 struct spdk_bdev_opts bdev_opts = {}; 5640 uint32_t i, num_outstanding; 5641 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5642 int rc; 5643 5644 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5645 bdev_opts.bdev_io_pool_size = 512; 5646 bdev_opts.bdev_io_cache_size = 64; 5647 ut_init_bdev(&bdev_opts); 5648 5649 bdev = allocate_bdev("bdev"); 5650 5651 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5652 CU_ASSERT_EQUAL(rc, 0); 5653 SPDK_CU_ASSERT_FATAL(desc != NULL); 5654 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5655 ioch = spdk_bdev_get_io_channel(desc); 5656 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5657 bdev_ch = spdk_io_channel_get_ctx(ioch); 5658 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5659 5660 fn_table.submit_request = stub_submit_request; 5661 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5662 5663 /* Case 1: First test the request won't be split */ 5664 num_blocks = 32; 5665 5666 g_io_done = false; 5667 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5668 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5669 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5670 CU_ASSERT_EQUAL(rc, 0); 5671 CU_ASSERT(g_io_done == false); 5672 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5673 stub_complete_io(1); 5674 CU_ASSERT(g_io_done == true); 5675 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5676 5677 /* Case 2: Test the split with 2 children requests */ 5678 max_write_zeroes_blocks = 8; 5679 bdev->max_write_zeroes = max_write_zeroes_blocks; 5680 num_blocks = max_write_zeroes_blocks * 2; 5681 offset = 0; 5682 5683 g_io_done = false; 5684 for (i = 0; i < 2; i++) { 5685 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5686 0); 5687 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5688 offset += max_write_zeroes_blocks; 5689 } 5690 5691 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5692 CU_ASSERT_EQUAL(rc, 0); 5693 CU_ASSERT(g_io_done == false); 5694 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5695 stub_complete_io(2); 5696 CU_ASSERT(g_io_done == true); 5697 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5698 5699 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5700 num_children = 15; 5701 num_blocks = max_write_zeroes_blocks * num_children; 5702 g_io_done = false; 5703 offset = 0; 5704 for (i = 0; i < num_children; i++) { 5705 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5706 0); 5707 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5708 offset += max_write_zeroes_blocks; 5709 } 5710 5711 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5712 CU_ASSERT_EQUAL(rc, 0); 5713 CU_ASSERT(g_io_done == false); 5714 5715 while (num_children > 0) { 5716 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5717 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5718 stub_complete_io(num_outstanding); 5719 num_children -= num_outstanding; 5720 } 5721 CU_ASSERT(g_io_done == true); 5722 5723 spdk_put_io_channel(ioch); 5724 spdk_bdev_close(desc); 5725 free_bdev(bdev); 5726 ut_fini_bdev(); 5727 } 5728 5729 static void 5730 bdev_set_options_test(void) 5731 { 5732 struct spdk_bdev_opts bdev_opts = {}; 5733 int rc; 5734 5735 /* Case1: Do not set opts_size */ 5736 rc = spdk_bdev_set_opts(&bdev_opts); 5737 CU_ASSERT(rc == -1); 5738 } 5739 5740 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5741 5742 static int 5743 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5744 int array_size) 5745 { 5746 if (array_size > 0 && domains) { 5747 domains[0] = g_bdev_memory_domain; 5748 } 5749 5750 return 1; 5751 } 5752 5753 static void 5754 bdev_get_memory_domains(void) 5755 { 5756 struct spdk_bdev_fn_table fn_table = { 5757 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5758 }; 5759 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5760 struct spdk_memory_domain *domains[2] = {}; 5761 int rc; 5762 5763 /* bdev is NULL */ 5764 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5765 CU_ASSERT(rc == -EINVAL); 5766 5767 /* domains is NULL */ 5768 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5769 CU_ASSERT(rc == 1); 5770 5771 /* array size is 0 */ 5772 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5773 CU_ASSERT(rc == 1); 5774 5775 /* get_supported_dma_device_types op is set */ 5776 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5777 CU_ASSERT(rc == 1); 5778 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5779 5780 /* get_supported_dma_device_types op is not set */ 5781 fn_table.get_memory_domains = NULL; 5782 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5783 CU_ASSERT(rc == 0); 5784 } 5785 5786 static void 5787 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5788 { 5789 struct spdk_bdev *bdev; 5790 struct spdk_bdev_desc *desc = NULL; 5791 struct spdk_io_channel *io_ch; 5792 char io_buf[512]; 5793 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5794 struct ut_expected_io *expected_io; 5795 int rc; 5796 5797 ut_init_bdev(NULL); 5798 5799 bdev = allocate_bdev("bdev0"); 5800 bdev->md_interleave = false; 5801 bdev->md_len = 8; 5802 5803 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5804 CU_ASSERT(rc == 0); 5805 SPDK_CU_ASSERT_FATAL(desc != NULL); 5806 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5807 io_ch = spdk_bdev_get_io_channel(desc); 5808 CU_ASSERT(io_ch != NULL); 5809 5810 /* read */ 5811 g_io_done = false; 5812 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5813 if (ext_io_opts) { 5814 expected_io->md_buf = ext_io_opts->metadata; 5815 } 5816 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5817 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5818 5819 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5820 5821 CU_ASSERT(rc == 0); 5822 CU_ASSERT(g_io_done == false); 5823 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5824 stub_complete_io(1); 5825 CU_ASSERT(g_io_done == true); 5826 5827 /* write */ 5828 g_io_done = false; 5829 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5830 if (ext_io_opts) { 5831 expected_io->md_buf = ext_io_opts->metadata; 5832 } 5833 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5834 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5835 5836 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5837 5838 CU_ASSERT(rc == 0); 5839 CU_ASSERT(g_io_done == false); 5840 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5841 stub_complete_io(1); 5842 CU_ASSERT(g_io_done == true); 5843 5844 spdk_put_io_channel(io_ch); 5845 spdk_bdev_close(desc); 5846 free_bdev(bdev); 5847 ut_fini_bdev(); 5848 5849 } 5850 5851 static void 5852 bdev_io_ext(void) 5853 { 5854 struct spdk_bdev_ext_io_opts ext_io_opts = { 5855 .metadata = (void *)0xFF000000, 5856 .size = sizeof(ext_io_opts), 5857 .dif_check_flags_exclude_mask = 0 5858 }; 5859 5860 _bdev_io_ext(&ext_io_opts); 5861 } 5862 5863 static void 5864 bdev_io_ext_no_opts(void) 5865 { 5866 _bdev_io_ext(NULL); 5867 } 5868 5869 static void 5870 bdev_io_ext_invalid_opts(void) 5871 { 5872 struct spdk_bdev *bdev; 5873 struct spdk_bdev_desc *desc = NULL; 5874 struct spdk_io_channel *io_ch; 5875 char io_buf[512]; 5876 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5877 struct spdk_bdev_ext_io_opts ext_io_opts = { 5878 .metadata = (void *)0xFF000000, 5879 .size = sizeof(ext_io_opts), 5880 .dif_check_flags_exclude_mask = 0 5881 }; 5882 int rc; 5883 5884 ut_init_bdev(NULL); 5885 5886 bdev = allocate_bdev("bdev0"); 5887 bdev->md_interleave = false; 5888 bdev->md_len = 8; 5889 5890 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5891 CU_ASSERT(rc == 0); 5892 SPDK_CU_ASSERT_FATAL(desc != NULL); 5893 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5894 io_ch = spdk_bdev_get_io_channel(desc); 5895 CU_ASSERT(io_ch != NULL); 5896 5897 /* Test invalid ext_opts size */ 5898 ext_io_opts.size = 0; 5899 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5900 CU_ASSERT(rc == -EINVAL); 5901 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5902 CU_ASSERT(rc == -EINVAL); 5903 5904 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5905 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5906 CU_ASSERT(rc == -EINVAL); 5907 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5908 CU_ASSERT(rc == -EINVAL); 5909 5910 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5911 sizeof(ext_io_opts.metadata) - 1; 5912 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5913 CU_ASSERT(rc == -EINVAL); 5914 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5915 CU_ASSERT(rc == -EINVAL); 5916 5917 spdk_put_io_channel(io_ch); 5918 spdk_bdev_close(desc); 5919 free_bdev(bdev); 5920 ut_fini_bdev(); 5921 } 5922 5923 static void 5924 bdev_io_ext_split(void) 5925 { 5926 struct spdk_bdev *bdev; 5927 struct spdk_bdev_desc *desc = NULL; 5928 struct spdk_io_channel *io_ch; 5929 char io_buf[512]; 5930 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5931 struct ut_expected_io *expected_io; 5932 struct spdk_bdev_ext_io_opts ext_io_opts = { 5933 .metadata = (void *)0xFF000000, 5934 .size = sizeof(ext_io_opts), 5935 .dif_check_flags_exclude_mask = 0 5936 }; 5937 int rc; 5938 5939 ut_init_bdev(NULL); 5940 5941 bdev = allocate_bdev("bdev0"); 5942 bdev->md_interleave = false; 5943 bdev->md_len = 8; 5944 5945 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5946 CU_ASSERT(rc == 0); 5947 SPDK_CU_ASSERT_FATAL(desc != NULL); 5948 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5949 io_ch = spdk_bdev_get_io_channel(desc); 5950 CU_ASSERT(io_ch != NULL); 5951 5952 /* Check that IO request with ext_opts and metadata is split correctly 5953 * Offset 14, length 8, payload 0xF000 5954 * Child - Offset 14, length 2, payload 0xF000 5955 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5956 */ 5957 bdev->optimal_io_boundary = 16; 5958 bdev->split_on_optimal_io_boundary = true; 5959 bdev->md_interleave = false; 5960 bdev->md_len = 8; 5961 5962 iov.iov_base = (void *)0xF000; 5963 iov.iov_len = 4096; 5964 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5965 ext_io_opts.metadata = (void *)0xFF000000; 5966 ext_io_opts.size = sizeof(ext_io_opts); 5967 g_io_done = false; 5968 5969 /* read */ 5970 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5971 expected_io->md_buf = ext_io_opts.metadata; 5972 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5973 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5974 5975 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5976 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5977 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5978 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5979 5980 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5981 CU_ASSERT(rc == 0); 5982 CU_ASSERT(g_io_done == false); 5983 5984 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5985 stub_complete_io(2); 5986 CU_ASSERT(g_io_done == true); 5987 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5988 5989 /* write */ 5990 g_io_done = false; 5991 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5992 expected_io->md_buf = ext_io_opts.metadata; 5993 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5994 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5995 5996 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5997 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5998 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5999 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6000 6001 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 6002 CU_ASSERT(rc == 0); 6003 CU_ASSERT(g_io_done == false); 6004 6005 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6006 stub_complete_io(2); 6007 CU_ASSERT(g_io_done == true); 6008 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6009 6010 spdk_put_io_channel(io_ch); 6011 spdk_bdev_close(desc); 6012 free_bdev(bdev); 6013 ut_fini_bdev(); 6014 } 6015 6016 static void 6017 bdev_io_ext_bounce_buffer(void) 6018 { 6019 struct spdk_bdev *bdev; 6020 struct spdk_bdev_desc *desc = NULL; 6021 struct spdk_io_channel *io_ch; 6022 char io_buf[512]; 6023 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 6024 struct ut_expected_io *expected_io, *aux_io; 6025 struct spdk_bdev_ext_io_opts ext_io_opts = { 6026 .metadata = (void *)0xFF000000, 6027 .size = sizeof(ext_io_opts), 6028 .dif_check_flags_exclude_mask = 0 6029 }; 6030 int rc; 6031 6032 ut_init_bdev(NULL); 6033 6034 bdev = allocate_bdev("bdev0"); 6035 bdev->md_interleave = false; 6036 bdev->md_len = 8; 6037 6038 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6039 CU_ASSERT(rc == 0); 6040 SPDK_CU_ASSERT_FATAL(desc != NULL); 6041 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6042 io_ch = spdk_bdev_get_io_channel(desc); 6043 CU_ASSERT(io_ch != NULL); 6044 6045 /* Verify data pull/push 6046 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 6047 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 6048 6049 /* read */ 6050 g_io_done = false; 6051 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6052 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6053 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6054 6055 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6056 6057 CU_ASSERT(rc == 0); 6058 CU_ASSERT(g_io_done == false); 6059 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6060 stub_complete_io(1); 6061 CU_ASSERT(g_memory_domain_push_data_called == true); 6062 CU_ASSERT(g_io_done == true); 6063 6064 /* write */ 6065 g_io_done = false; 6066 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6067 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6068 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6069 6070 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6071 6072 CU_ASSERT(rc == 0); 6073 CU_ASSERT(g_memory_domain_pull_data_called == true); 6074 CU_ASSERT(g_io_done == false); 6075 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6076 stub_complete_io(1); 6077 CU_ASSERT(g_io_done == true); 6078 6079 /* Verify the request is queued after receiving ENOMEM from pull */ 6080 g_io_done = false; 6081 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6082 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6083 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6084 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6085 CU_ASSERT(rc == 0); 6086 CU_ASSERT(g_io_done == false); 6087 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6088 6089 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6090 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6091 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6092 6093 MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM); 6094 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6095 CU_ASSERT(rc == 0); 6096 CU_ASSERT(g_io_done == false); 6097 /* The second IO has been queued */ 6098 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6099 6100 MOCK_CLEAR(spdk_memory_domain_pull_data); 6101 g_memory_domain_pull_data_called = false; 6102 stub_complete_io(1); 6103 CU_ASSERT(g_io_done == true); 6104 CU_ASSERT(g_memory_domain_pull_data_called == true); 6105 /* The second IO should be submitted now */ 6106 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6107 g_io_done = false; 6108 stub_complete_io(1); 6109 CU_ASSERT(g_io_done == true); 6110 6111 /* Verify the request is queued after receiving ENOMEM from push */ 6112 g_io_done = false; 6113 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6114 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6115 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6116 6117 MOCK_SET(spdk_memory_domain_push_data, -ENOMEM); 6118 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6119 CU_ASSERT(rc == 0); 6120 CU_ASSERT(g_io_done == false); 6121 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6122 6123 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6124 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6125 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6126 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6127 CU_ASSERT(rc == 0); 6128 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6129 6130 stub_complete_io(1); 6131 /* The IO isn't done yet, it's still waiting on push */ 6132 CU_ASSERT(g_io_done == false); 6133 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6134 MOCK_CLEAR(spdk_memory_domain_push_data); 6135 g_memory_domain_push_data_called = false; 6136 /* Completing the second IO should also trigger push on the first one */ 6137 stub_complete_io(1); 6138 CU_ASSERT(g_io_done == true); 6139 CU_ASSERT(g_memory_domain_push_data_called == true); 6140 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6141 6142 spdk_put_io_channel(io_ch); 6143 spdk_bdev_close(desc); 6144 free_bdev(bdev); 6145 ut_fini_bdev(); 6146 } 6147 6148 static void 6149 bdev_register_uuid_alias(void) 6150 { 6151 struct spdk_bdev *bdev, *second; 6152 char uuid[SPDK_UUID_STRING_LEN]; 6153 int rc; 6154 6155 ut_init_bdev(NULL); 6156 bdev = allocate_bdev("bdev0"); 6157 6158 /* Make sure an UUID was generated */ 6159 CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid)); 6160 6161 /* Check that an UUID alias was registered */ 6162 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6163 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6164 6165 /* Unregister the bdev */ 6166 spdk_bdev_unregister(bdev, NULL, NULL); 6167 poll_threads(); 6168 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6169 6170 /* Check the same, but this time register the bdev with non-zero UUID */ 6171 rc = spdk_bdev_register(bdev); 6172 CU_ASSERT_EQUAL(rc, 0); 6173 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6174 6175 /* Unregister the bdev */ 6176 spdk_bdev_unregister(bdev, NULL, NULL); 6177 poll_threads(); 6178 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6179 6180 /* Regiser the bdev using UUID as the name */ 6181 bdev->name = uuid; 6182 rc = spdk_bdev_register(bdev); 6183 CU_ASSERT_EQUAL(rc, 0); 6184 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6185 6186 /* Unregister the bdev */ 6187 spdk_bdev_unregister(bdev, NULL, NULL); 6188 poll_threads(); 6189 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6190 6191 /* Check that it's not possible to register two bdevs with the same UUIDs */ 6192 bdev->name = "bdev0"; 6193 second = allocate_bdev("bdev1"); 6194 spdk_uuid_copy(&bdev->uuid, &second->uuid); 6195 rc = spdk_bdev_register(bdev); 6196 CU_ASSERT_EQUAL(rc, -EEXIST); 6197 6198 /* Regenerate the UUID and re-check */ 6199 spdk_uuid_generate(&bdev->uuid); 6200 rc = spdk_bdev_register(bdev); 6201 CU_ASSERT_EQUAL(rc, 0); 6202 6203 /* And check that both bdevs can be retrieved through their UUIDs */ 6204 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6205 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6206 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 6207 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 6208 6209 free_bdev(second); 6210 free_bdev(bdev); 6211 ut_fini_bdev(); 6212 } 6213 6214 static void 6215 bdev_unregister_by_name(void) 6216 { 6217 struct spdk_bdev *bdev; 6218 int rc; 6219 6220 bdev = allocate_bdev("bdev"); 6221 6222 g_event_type1 = 0xFF; 6223 g_unregister_arg = NULL; 6224 g_unregister_rc = -1; 6225 6226 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6227 CU_ASSERT(rc == -ENODEV); 6228 6229 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6230 CU_ASSERT(rc == -ENODEV); 6231 6232 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6233 CU_ASSERT(rc == 0); 6234 6235 /* Check that unregister callback is delayed */ 6236 CU_ASSERT(g_unregister_arg == NULL); 6237 CU_ASSERT(g_unregister_rc == -1); 6238 6239 poll_threads(); 6240 6241 /* Event callback shall not be issued because device was closed */ 6242 CU_ASSERT(g_event_type1 == 0xFF); 6243 /* Unregister callback is issued */ 6244 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 6245 CU_ASSERT(g_unregister_rc == 0); 6246 6247 free_bdev(bdev); 6248 } 6249 6250 static int 6251 count_bdevs(void *ctx, struct spdk_bdev *bdev) 6252 { 6253 int *count = ctx; 6254 6255 (*count)++; 6256 6257 return 0; 6258 } 6259 6260 static void 6261 for_each_bdev_test(void) 6262 { 6263 struct spdk_bdev *bdev[8]; 6264 int rc, count; 6265 6266 bdev[0] = allocate_bdev("bdev0"); 6267 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 6268 6269 bdev[1] = allocate_bdev("bdev1"); 6270 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 6271 CU_ASSERT(rc == 0); 6272 6273 bdev[2] = allocate_bdev("bdev2"); 6274 6275 bdev[3] = allocate_bdev("bdev3"); 6276 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 6277 CU_ASSERT(rc == 0); 6278 6279 bdev[4] = allocate_bdev("bdev4"); 6280 6281 bdev[5] = allocate_bdev("bdev5"); 6282 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 6283 CU_ASSERT(rc == 0); 6284 6285 bdev[6] = allocate_bdev("bdev6"); 6286 6287 bdev[7] = allocate_bdev("bdev7"); 6288 6289 count = 0; 6290 rc = spdk_for_each_bdev(&count, count_bdevs); 6291 CU_ASSERT(rc == 0); 6292 CU_ASSERT(count == 7); 6293 6294 count = 0; 6295 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 6296 CU_ASSERT(rc == 0); 6297 CU_ASSERT(count == 4); 6298 6299 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 6300 free_bdev(bdev[0]); 6301 free_bdev(bdev[1]); 6302 free_bdev(bdev[2]); 6303 free_bdev(bdev[3]); 6304 free_bdev(bdev[4]); 6305 free_bdev(bdev[5]); 6306 free_bdev(bdev[6]); 6307 free_bdev(bdev[7]); 6308 } 6309 6310 static void 6311 bdev_seek_test(void) 6312 { 6313 struct spdk_bdev *bdev; 6314 struct spdk_bdev_desc *desc = NULL; 6315 struct spdk_io_channel *io_ch; 6316 int rc; 6317 6318 ut_init_bdev(NULL); 6319 poll_threads(); 6320 6321 bdev = allocate_bdev("bdev0"); 6322 6323 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6324 CU_ASSERT(rc == 0); 6325 poll_threads(); 6326 SPDK_CU_ASSERT_FATAL(desc != NULL); 6327 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6328 io_ch = spdk_bdev_get_io_channel(desc); 6329 CU_ASSERT(io_ch != NULL); 6330 6331 /* Seek data not supported */ 6332 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6333 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6334 CU_ASSERT(rc == 0); 6335 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6336 poll_threads(); 6337 CU_ASSERT(g_seek_offset == 0); 6338 6339 /* Seek hole not supported */ 6340 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6341 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6342 CU_ASSERT(rc == 0); 6343 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6344 poll_threads(); 6345 CU_ASSERT(g_seek_offset == UINT64_MAX); 6346 6347 /* Seek data supported */ 6348 g_seek_data_offset = 12345; 6349 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6350 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6351 CU_ASSERT(rc == 0); 6352 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6353 stub_complete_io(1); 6354 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6355 CU_ASSERT(g_seek_offset == 12345); 6356 6357 /* Seek hole supported */ 6358 g_seek_hole_offset = 67890; 6359 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6360 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6361 CU_ASSERT(rc == 0); 6362 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6363 stub_complete_io(1); 6364 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6365 CU_ASSERT(g_seek_offset == 67890); 6366 6367 spdk_put_io_channel(io_ch); 6368 spdk_bdev_close(desc); 6369 free_bdev(bdev); 6370 ut_fini_bdev(); 6371 } 6372 6373 static void 6374 bdev_copy(void) 6375 { 6376 struct spdk_bdev *bdev; 6377 struct spdk_bdev_desc *desc = NULL; 6378 struct spdk_io_channel *ioch; 6379 struct ut_expected_io *expected_io; 6380 uint64_t src_offset, num_blocks; 6381 uint32_t num_completed; 6382 int rc; 6383 6384 ut_init_bdev(NULL); 6385 bdev = allocate_bdev("bdev"); 6386 6387 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6388 CU_ASSERT_EQUAL(rc, 0); 6389 SPDK_CU_ASSERT_FATAL(desc != NULL); 6390 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6391 ioch = spdk_bdev_get_io_channel(desc); 6392 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6393 6394 fn_table.submit_request = stub_submit_request; 6395 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6396 6397 /* First test that if the bdev supports copy, the request won't be split */ 6398 bdev->md_len = 0; 6399 bdev->blocklen = 512; 6400 num_blocks = 128; 6401 src_offset = bdev->blockcnt - num_blocks; 6402 6403 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6404 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6405 6406 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6407 CU_ASSERT_EQUAL(rc, 0); 6408 num_completed = stub_complete_io(1); 6409 CU_ASSERT_EQUAL(num_completed, 1); 6410 6411 /* Check that if copy is not supported it'll still work */ 6412 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6413 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6414 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6415 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6416 6417 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6418 6419 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6420 CU_ASSERT_EQUAL(rc, 0); 6421 num_completed = stub_complete_io(1); 6422 CU_ASSERT_EQUAL(num_completed, 1); 6423 num_completed = stub_complete_io(1); 6424 CU_ASSERT_EQUAL(num_completed, 1); 6425 6426 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6427 spdk_put_io_channel(ioch); 6428 spdk_bdev_close(desc); 6429 free_bdev(bdev); 6430 ut_fini_bdev(); 6431 } 6432 6433 static void 6434 bdev_copy_split_test(void) 6435 { 6436 struct spdk_bdev *bdev; 6437 struct spdk_bdev_desc *desc = NULL; 6438 struct spdk_io_channel *ioch; 6439 struct spdk_bdev_channel *bdev_ch; 6440 struct ut_expected_io *expected_io; 6441 struct spdk_bdev_opts bdev_opts = {}; 6442 uint32_t i, num_outstanding; 6443 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6444 int rc; 6445 6446 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6447 bdev_opts.bdev_io_pool_size = 512; 6448 bdev_opts.bdev_io_cache_size = 64; 6449 rc = spdk_bdev_set_opts(&bdev_opts); 6450 CU_ASSERT(rc == 0); 6451 6452 ut_init_bdev(NULL); 6453 bdev = allocate_bdev("bdev"); 6454 6455 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6456 CU_ASSERT_EQUAL(rc, 0); 6457 SPDK_CU_ASSERT_FATAL(desc != NULL); 6458 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6459 ioch = spdk_bdev_get_io_channel(desc); 6460 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6461 bdev_ch = spdk_io_channel_get_ctx(ioch); 6462 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6463 6464 fn_table.submit_request = stub_submit_request; 6465 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6466 6467 /* Case 1: First test the request won't be split */ 6468 num_blocks = 32; 6469 src_offset = bdev->blockcnt - num_blocks; 6470 6471 g_io_done = false; 6472 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6473 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6474 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6475 CU_ASSERT_EQUAL(rc, 0); 6476 CU_ASSERT(g_io_done == false); 6477 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6478 stub_complete_io(1); 6479 CU_ASSERT(g_io_done == true); 6480 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6481 6482 /* Case 2: Test the split with 2 children requests */ 6483 max_copy_blocks = 8; 6484 bdev->max_copy = max_copy_blocks; 6485 num_children = 2; 6486 num_blocks = max_copy_blocks * num_children; 6487 offset = 0; 6488 src_offset = bdev->blockcnt - num_blocks; 6489 6490 g_io_done = false; 6491 for (i = 0; i < num_children; i++) { 6492 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6493 src_offset + offset, max_copy_blocks); 6494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6495 offset += max_copy_blocks; 6496 } 6497 6498 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6499 CU_ASSERT_EQUAL(rc, 0); 6500 CU_ASSERT(g_io_done == false); 6501 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6502 stub_complete_io(num_children); 6503 CU_ASSERT(g_io_done == true); 6504 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6505 6506 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6507 num_children = 15; 6508 num_blocks = max_copy_blocks * num_children; 6509 offset = 0; 6510 src_offset = bdev->blockcnt - num_blocks; 6511 6512 g_io_done = false; 6513 for (i = 0; i < num_children; i++) { 6514 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6515 src_offset + offset, max_copy_blocks); 6516 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6517 offset += max_copy_blocks; 6518 } 6519 6520 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6521 CU_ASSERT_EQUAL(rc, 0); 6522 CU_ASSERT(g_io_done == false); 6523 6524 while (num_children > 0) { 6525 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6526 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6527 stub_complete_io(num_outstanding); 6528 num_children -= num_outstanding; 6529 } 6530 CU_ASSERT(g_io_done == true); 6531 6532 /* Case 4: Same test scenario as the case 2 but the configuration is different. 6533 * Copy is not supported. 6534 */ 6535 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6536 6537 num_children = 2; 6538 max_copy_blocks = spdk_bdev_get_max_copy(bdev); 6539 num_blocks = max_copy_blocks * num_children; 6540 src_offset = bdev->blockcnt - num_blocks; 6541 offset = 0; 6542 6543 g_io_done = false; 6544 for (i = 0; i < num_children; i++) { 6545 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, 6546 max_copy_blocks, 0); 6547 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6548 src_offset += max_copy_blocks; 6549 } 6550 for (i = 0; i < num_children; i++) { 6551 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, 6552 max_copy_blocks, 0); 6553 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6554 offset += max_copy_blocks; 6555 } 6556 6557 src_offset = bdev->blockcnt - num_blocks; 6558 offset = 0; 6559 6560 rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL); 6561 CU_ASSERT_EQUAL(rc, 0); 6562 CU_ASSERT(g_io_done == false); 6563 6564 while (num_children > 0) { 6565 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6566 6567 /* One copy request is split into one read and one write requests. */ 6568 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6569 stub_complete_io(num_outstanding); 6570 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6571 stub_complete_io(num_outstanding); 6572 6573 num_children -= num_outstanding; 6574 } 6575 CU_ASSERT(g_io_done == true); 6576 6577 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6578 6579 spdk_put_io_channel(ioch); 6580 spdk_bdev_close(desc); 6581 free_bdev(bdev); 6582 ut_fini_bdev(); 6583 } 6584 6585 static void 6586 examine_claim_v1(struct spdk_bdev *bdev) 6587 { 6588 int rc; 6589 6590 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6591 CU_ASSERT(rc == 0); 6592 } 6593 6594 static void 6595 examine_no_lock_held(struct spdk_bdev *bdev) 6596 { 6597 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6598 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6599 } 6600 6601 struct examine_claim_v2_ctx { 6602 struct ut_examine_ctx examine_ctx; 6603 enum spdk_bdev_claim_type claim_type; 6604 struct spdk_bdev_desc *desc; 6605 }; 6606 6607 static void 6608 examine_claim_v2(struct spdk_bdev *bdev) 6609 { 6610 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6611 int rc; 6612 6613 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6614 CU_ASSERT(rc == 0); 6615 6616 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6617 CU_ASSERT(rc == 0); 6618 } 6619 6620 static void 6621 examine_locks(void) 6622 { 6623 struct spdk_bdev *bdev; 6624 struct ut_examine_ctx ctx = { 0 }; 6625 struct examine_claim_v2_ctx v2_ctx; 6626 6627 /* Without any claims, one code path is taken */ 6628 ctx.examine_config = examine_no_lock_held; 6629 ctx.examine_disk = examine_no_lock_held; 6630 bdev = allocate_bdev_ctx("bdev0", &ctx); 6631 CU_ASSERT(ctx.examine_config_count == 1); 6632 CU_ASSERT(ctx.examine_disk_count == 1); 6633 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6634 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6635 free_bdev(bdev); 6636 6637 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6638 memset(&ctx, 0, sizeof(ctx)); 6639 ctx.examine_config = examine_claim_v1; 6640 ctx.examine_disk = examine_no_lock_held; 6641 bdev = allocate_bdev_ctx("bdev0", &ctx); 6642 CU_ASSERT(ctx.examine_config_count == 1); 6643 CU_ASSERT(ctx.examine_disk_count == 1); 6644 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6645 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6646 spdk_bdev_module_release_bdev(bdev); 6647 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6648 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6649 free_bdev(bdev); 6650 6651 /* Exercise the final path that comes with v2 claims. */ 6652 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6653 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6654 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6655 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6656 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6657 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6658 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6659 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6660 spdk_bdev_close(v2_ctx.desc); 6661 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6662 free_bdev(bdev); 6663 } 6664 6665 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6666 do { \ 6667 uint32_t len = 0; \ 6668 struct spdk_bdev_module_claim *claim; \ 6669 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6670 len++; \ 6671 } \ 6672 CU_ASSERT(len == expect); \ 6673 } while (0) 6674 6675 static void 6676 claim_v2_rwo(void) 6677 { 6678 struct spdk_bdev *bdev; 6679 struct spdk_bdev_desc *desc; 6680 struct spdk_bdev_desc *desc2; 6681 struct spdk_bdev_claim_opts opts; 6682 int rc; 6683 6684 bdev = allocate_bdev("bdev0"); 6685 6686 /* Claim without options */ 6687 desc = NULL; 6688 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6689 CU_ASSERT(rc == 0); 6690 SPDK_CU_ASSERT_FATAL(desc != NULL); 6691 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6692 &bdev_ut_if); 6693 CU_ASSERT(rc == 0); 6694 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6695 CU_ASSERT(desc->claim != NULL); 6696 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6697 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6698 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6699 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6700 6701 /* Release the claim by closing the descriptor */ 6702 spdk_bdev_close(desc); 6703 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6704 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6705 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6706 6707 /* Claim with options */ 6708 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6709 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6710 desc = NULL; 6711 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6712 CU_ASSERT(rc == 0); 6713 SPDK_CU_ASSERT_FATAL(desc != NULL); 6714 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6715 &bdev_ut_if); 6716 CU_ASSERT(rc == 0); 6717 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6718 CU_ASSERT(desc->claim != NULL); 6719 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6720 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6721 memset(&opts, 0, sizeof(opts)); 6722 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6723 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6724 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6725 6726 /* The claim blocks new writers. */ 6727 desc2 = NULL; 6728 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6729 CU_ASSERT(rc == -EPERM); 6730 CU_ASSERT(desc2 == NULL); 6731 6732 /* New readers are allowed */ 6733 desc2 = NULL; 6734 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6735 CU_ASSERT(rc == 0); 6736 CU_ASSERT(desc2 != NULL); 6737 CU_ASSERT(!desc2->write); 6738 6739 /* No new v2 RWO claims are allowed */ 6740 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6741 &bdev_ut_if); 6742 CU_ASSERT(rc == -EPERM); 6743 6744 /* No new v2 ROM claims are allowed */ 6745 CU_ASSERT(!desc2->write); 6746 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6747 &bdev_ut_if); 6748 CU_ASSERT(rc == -EPERM); 6749 CU_ASSERT(!desc2->write); 6750 6751 /* No new v2 RWM claims are allowed */ 6752 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6753 opts.shared_claim_key = (uint64_t)&opts; 6754 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6755 &bdev_ut_if); 6756 CU_ASSERT(rc == -EPERM); 6757 CU_ASSERT(!desc2->write); 6758 6759 /* No new v1 claims are allowed */ 6760 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6761 CU_ASSERT(rc == -EPERM); 6762 6763 /* None of the above changed the existing claim */ 6764 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6765 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6766 6767 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6768 spdk_bdev_close(desc); 6769 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6770 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6771 CU_ASSERT(!desc2->write); 6772 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6773 &bdev_ut_if); 6774 CU_ASSERT(rc == 0); 6775 CU_ASSERT(desc2->claim != NULL); 6776 CU_ASSERT(desc2->write); 6777 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6778 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6779 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6780 spdk_bdev_close(desc2); 6781 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6782 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6783 6784 /* Cannot claim with a key */ 6785 desc = NULL; 6786 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6787 CU_ASSERT(rc == 0); 6788 SPDK_CU_ASSERT_FATAL(desc != NULL); 6789 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6790 opts.shared_claim_key = (uint64_t)&opts; 6791 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6792 &bdev_ut_if); 6793 CU_ASSERT(rc == -EINVAL); 6794 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6795 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6796 spdk_bdev_close(desc); 6797 6798 /* Clean up */ 6799 free_bdev(bdev); 6800 } 6801 6802 static void 6803 claim_v2_rom(void) 6804 { 6805 struct spdk_bdev *bdev; 6806 struct spdk_bdev_desc *desc; 6807 struct spdk_bdev_desc *desc2; 6808 struct spdk_bdev_claim_opts opts; 6809 int rc; 6810 6811 bdev = allocate_bdev("bdev0"); 6812 6813 /* Claim without options */ 6814 desc = NULL; 6815 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6816 CU_ASSERT(rc == 0); 6817 SPDK_CU_ASSERT_FATAL(desc != NULL); 6818 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6819 &bdev_ut_if); 6820 CU_ASSERT(rc == 0); 6821 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6822 CU_ASSERT(desc->claim != NULL); 6823 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6824 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6825 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6826 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6827 6828 /* Release the claim by closing the descriptor */ 6829 spdk_bdev_close(desc); 6830 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6831 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6832 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6833 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6834 6835 /* Claim with options */ 6836 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6837 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6838 desc = NULL; 6839 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6840 CU_ASSERT(rc == 0); 6841 SPDK_CU_ASSERT_FATAL(desc != NULL); 6842 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6843 &bdev_ut_if); 6844 CU_ASSERT(rc == 0); 6845 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6846 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6847 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6848 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6849 memset(&opts, 0, sizeof(opts)); 6850 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6851 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6852 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6853 6854 /* The claim blocks new writers. */ 6855 desc2 = NULL; 6856 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6857 CU_ASSERT(rc == -EPERM); 6858 CU_ASSERT(desc2 == NULL); 6859 6860 /* New readers are allowed */ 6861 desc2 = NULL; 6862 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6863 CU_ASSERT(rc == 0); 6864 CU_ASSERT(desc2 != NULL); 6865 CU_ASSERT(!desc2->write); 6866 6867 /* No new v2 RWO claims are allowed */ 6868 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6869 &bdev_ut_if); 6870 CU_ASSERT(rc == -EPERM); 6871 6872 /* No new v2 RWM claims are allowed */ 6873 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6874 opts.shared_claim_key = (uint64_t)&opts; 6875 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6876 &bdev_ut_if); 6877 CU_ASSERT(rc == -EPERM); 6878 CU_ASSERT(!desc2->write); 6879 6880 /* No new v1 claims are allowed */ 6881 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6882 CU_ASSERT(rc == -EPERM); 6883 6884 /* None of the above messed up the existing claim */ 6885 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6886 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6887 6888 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 6889 CU_ASSERT(!desc2->write); 6890 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6891 &bdev_ut_if); 6892 CU_ASSERT(rc == 0); 6893 CU_ASSERT(!desc2->write); 6894 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6895 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6896 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6897 6898 /* Claim remains when closing the first descriptor */ 6899 spdk_bdev_close(desc); 6900 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6901 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6902 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6903 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6904 6905 /* Claim removed when closing the other descriptor */ 6906 spdk_bdev_close(desc2); 6907 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6908 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6909 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6910 6911 /* Cannot claim with a key */ 6912 desc = NULL; 6913 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6914 CU_ASSERT(rc == 0); 6915 SPDK_CU_ASSERT_FATAL(desc != NULL); 6916 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6917 opts.shared_claim_key = (uint64_t)&opts; 6918 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6919 &bdev_ut_if); 6920 CU_ASSERT(rc == -EINVAL); 6921 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6922 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6923 spdk_bdev_close(desc); 6924 6925 /* Cannot claim with a read-write descriptor */ 6926 desc = NULL; 6927 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6928 CU_ASSERT(rc == 0); 6929 SPDK_CU_ASSERT_FATAL(desc != NULL); 6930 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6931 &bdev_ut_if); 6932 CU_ASSERT(rc == -EINVAL); 6933 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6934 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6935 spdk_bdev_close(desc); 6936 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6937 6938 /* Clean up */ 6939 free_bdev(bdev); 6940 } 6941 6942 static void 6943 claim_v2_rwm(void) 6944 { 6945 struct spdk_bdev *bdev; 6946 struct spdk_bdev_desc *desc; 6947 struct spdk_bdev_desc *desc2; 6948 struct spdk_bdev_claim_opts opts; 6949 char good_key, bad_key; 6950 int rc; 6951 6952 bdev = allocate_bdev("bdev0"); 6953 6954 /* Claim without options should fail */ 6955 desc = NULL; 6956 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6957 CU_ASSERT(rc == 0); 6958 SPDK_CU_ASSERT_FATAL(desc != NULL); 6959 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 6960 &bdev_ut_if); 6961 CU_ASSERT(rc == -EINVAL); 6962 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6963 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6964 CU_ASSERT(desc->claim == NULL); 6965 6966 /* Claim with options */ 6967 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6968 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6969 opts.shared_claim_key = (uint64_t)&good_key; 6970 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6971 &bdev_ut_if); 6972 CU_ASSERT(rc == 0); 6973 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6974 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6975 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6976 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6977 memset(&opts, 0, sizeof(opts)); 6978 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6979 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6980 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6981 6982 /* The claim blocks new writers. */ 6983 desc2 = NULL; 6984 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6985 CU_ASSERT(rc == -EPERM); 6986 CU_ASSERT(desc2 == NULL); 6987 6988 /* New readers are allowed */ 6989 desc2 = NULL; 6990 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6991 CU_ASSERT(rc == 0); 6992 CU_ASSERT(desc2 != NULL); 6993 CU_ASSERT(!desc2->write); 6994 6995 /* No new v2 RWO claims are allowed */ 6996 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6997 &bdev_ut_if); 6998 CU_ASSERT(rc == -EPERM); 6999 7000 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 7001 CU_ASSERT(!desc2->write); 7002 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7003 &bdev_ut_if); 7004 CU_ASSERT(rc == -EPERM); 7005 CU_ASSERT(!desc2->write); 7006 7007 /* No new v1 claims are allowed */ 7008 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7009 CU_ASSERT(rc == -EPERM); 7010 7011 /* No new v2 RWM claims are allowed if the key does not match */ 7012 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7013 opts.shared_claim_key = (uint64_t)&bad_key; 7014 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7015 &bdev_ut_if); 7016 CU_ASSERT(rc == -EPERM); 7017 CU_ASSERT(!desc2->write); 7018 7019 /* None of the above messed up the existing claim */ 7020 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7021 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7022 7023 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 7024 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7025 opts.shared_claim_key = (uint64_t)&good_key; 7026 CU_ASSERT(!desc2->write); 7027 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7028 &bdev_ut_if); 7029 CU_ASSERT(rc == 0); 7030 CU_ASSERT(desc2->write); 7031 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 7032 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 7033 7034 /* Claim remains when closing the first descriptor */ 7035 spdk_bdev_close(desc); 7036 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 7037 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 7038 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 7039 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7040 7041 /* Claim removed when closing the other descriptor */ 7042 spdk_bdev_close(desc2); 7043 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7044 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7045 7046 /* Cannot claim without a key */ 7047 desc = NULL; 7048 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7049 CU_ASSERT(rc == 0); 7050 SPDK_CU_ASSERT_FATAL(desc != NULL); 7051 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7052 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7053 &bdev_ut_if); 7054 CU_ASSERT(rc == -EINVAL); 7055 spdk_bdev_close(desc); 7056 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7057 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7058 7059 /* Clean up */ 7060 free_bdev(bdev); 7061 } 7062 7063 static void 7064 claim_v2_existing_writer(void) 7065 { 7066 struct spdk_bdev *bdev; 7067 struct spdk_bdev_desc *desc; 7068 struct spdk_bdev_desc *desc2; 7069 struct spdk_bdev_claim_opts opts; 7070 enum spdk_bdev_claim_type type; 7071 enum spdk_bdev_claim_type types[] = { 7072 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7073 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7074 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7075 }; 7076 size_t i; 7077 int rc; 7078 7079 bdev = allocate_bdev("bdev0"); 7080 7081 desc = NULL; 7082 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7083 CU_ASSERT(rc == 0); 7084 SPDK_CU_ASSERT_FATAL(desc != NULL); 7085 desc2 = NULL; 7086 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7087 CU_ASSERT(rc == 0); 7088 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 7089 7090 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7091 type = types[i]; 7092 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7093 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7094 opts.shared_claim_key = (uint64_t)&opts; 7095 } 7096 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7097 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7098 CU_ASSERT(rc == -EINVAL); 7099 } else { 7100 CU_ASSERT(rc == -EPERM); 7101 } 7102 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7103 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 7104 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7105 CU_ASSERT(rc == -EINVAL); 7106 } else { 7107 CU_ASSERT(rc == -EPERM); 7108 } 7109 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7110 } 7111 7112 spdk_bdev_close(desc); 7113 spdk_bdev_close(desc2); 7114 7115 /* Clean up */ 7116 free_bdev(bdev); 7117 } 7118 7119 static void 7120 claim_v2_existing_v1(void) 7121 { 7122 struct spdk_bdev *bdev; 7123 struct spdk_bdev_desc *desc; 7124 struct spdk_bdev_claim_opts opts; 7125 enum spdk_bdev_claim_type type; 7126 enum spdk_bdev_claim_type types[] = { 7127 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7128 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7129 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7130 }; 7131 size_t i; 7132 int rc; 7133 7134 bdev = allocate_bdev("bdev0"); 7135 7136 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7137 CU_ASSERT(rc == 0); 7138 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7139 7140 desc = NULL; 7141 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7142 CU_ASSERT(rc == 0); 7143 SPDK_CU_ASSERT_FATAL(desc != NULL); 7144 7145 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7146 type = types[i]; 7147 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7148 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7149 opts.shared_claim_key = (uint64_t)&opts; 7150 } 7151 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7152 CU_ASSERT(rc == -EPERM); 7153 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7154 } 7155 7156 spdk_bdev_module_release_bdev(bdev); 7157 spdk_bdev_close(desc); 7158 7159 /* Clean up */ 7160 free_bdev(bdev); 7161 } 7162 7163 static void 7164 claim_v1_existing_v2(void) 7165 { 7166 struct spdk_bdev *bdev; 7167 struct spdk_bdev_desc *desc; 7168 struct spdk_bdev_claim_opts opts; 7169 enum spdk_bdev_claim_type type; 7170 enum spdk_bdev_claim_type types[] = { 7171 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7172 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7173 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7174 }; 7175 size_t i; 7176 int rc; 7177 7178 bdev = allocate_bdev("bdev0"); 7179 7180 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7181 type = types[i]; 7182 7183 desc = NULL; 7184 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7185 CU_ASSERT(rc == 0); 7186 SPDK_CU_ASSERT_FATAL(desc != NULL); 7187 7188 /* Get a v2 claim */ 7189 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7190 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7191 opts.shared_claim_key = (uint64_t)&opts; 7192 } 7193 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7194 CU_ASSERT(rc == 0); 7195 7196 /* Fail to get a v1 claim */ 7197 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7198 CU_ASSERT(rc == -EPERM); 7199 7200 spdk_bdev_close(desc); 7201 7202 /* Now v1 succeeds */ 7203 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7204 CU_ASSERT(rc == 0) 7205 spdk_bdev_module_release_bdev(bdev); 7206 } 7207 7208 /* Clean up */ 7209 free_bdev(bdev); 7210 } 7211 7212 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 7213 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 7214 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 7215 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 7216 7217 #define UT_MAX_EXAMINE_MODS 2 7218 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 7219 { 7220 .name = "vbdev_ut_examine0", 7221 .module_init = vbdev_ut_module_init, 7222 .module_fini = vbdev_ut_module_fini, 7223 .examine_config = ut_examine_claimed_config0, 7224 .examine_disk = ut_examine_claimed_disk0, 7225 }, 7226 { 7227 .name = "vbdev_ut_examine1", 7228 .module_init = vbdev_ut_module_init, 7229 .module_fini = vbdev_ut_module_fini, 7230 .examine_config = ut_examine_claimed_config1, 7231 .examine_disk = ut_examine_claimed_disk1, 7232 } 7233 }; 7234 7235 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 7236 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 7237 7238 struct ut_examine_claimed_ctx { 7239 uint32_t examine_config_count; 7240 uint32_t examine_disk_count; 7241 7242 /* Claim type to take, with these options */ 7243 enum spdk_bdev_claim_type claim_type; 7244 struct spdk_bdev_claim_opts claim_opts; 7245 7246 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 7247 int expect_claim_err; 7248 7249 /* Descriptor used for a claim */ 7250 struct spdk_bdev_desc *desc; 7251 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 7252 7253 bool ut_testing_examine_claimed; 7254 7255 static void 7256 reset_examine_claimed_ctx(void) 7257 { 7258 struct ut_examine_claimed_ctx *ctx; 7259 uint32_t i; 7260 7261 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 7262 ctx = &examine_claimed_ctx[i]; 7263 if (ctx->desc != NULL) { 7264 spdk_bdev_close(ctx->desc); 7265 } 7266 memset(ctx, 0, sizeof(*ctx)); 7267 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 7268 } 7269 } 7270 7271 static void 7272 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 7273 { 7274 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7275 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7276 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7277 int rc; 7278 7279 if (!ut_testing_examine_claimed) { 7280 spdk_bdev_module_examine_done(module); 7281 return; 7282 } 7283 7284 ctx->examine_config_count++; 7285 7286 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 7287 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 7288 &ctx->desc); 7289 CU_ASSERT(rc == 0); 7290 7291 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 7292 CU_ASSERT(rc == ctx->expect_claim_err); 7293 } 7294 spdk_bdev_module_examine_done(module); 7295 } 7296 7297 static void 7298 ut_examine_claimed_config0(struct spdk_bdev *bdev) 7299 { 7300 examine_claimed_config(bdev, 0); 7301 } 7302 7303 static void 7304 ut_examine_claimed_config1(struct spdk_bdev *bdev) 7305 { 7306 examine_claimed_config(bdev, 1); 7307 } 7308 7309 static void 7310 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 7311 { 7312 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7313 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7314 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7315 7316 if (!ut_testing_examine_claimed) { 7317 spdk_bdev_module_examine_done(module); 7318 return; 7319 } 7320 7321 ctx->examine_disk_count++; 7322 7323 spdk_bdev_module_examine_done(module); 7324 } 7325 7326 static void 7327 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 7328 { 7329 examine_claimed_disk(bdev, 0); 7330 } 7331 7332 static void 7333 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 7334 { 7335 examine_claimed_disk(bdev, 1); 7336 } 7337 7338 static void 7339 examine_claimed(void) 7340 { 7341 struct spdk_bdev *bdev; 7342 struct spdk_bdev_module *mod = examine_claimed_mods; 7343 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 7344 7345 ut_testing_examine_claimed = true; 7346 reset_examine_claimed_ctx(); 7347 7348 /* 7349 * With one module claiming, both modules' examine_config should be called, but only the 7350 * claiming module's examine_disk should be called. 7351 */ 7352 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7353 bdev = allocate_bdev("bdev0"); 7354 CU_ASSERT(ctx[0].examine_config_count == 1); 7355 CU_ASSERT(ctx[0].examine_disk_count == 1); 7356 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7357 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7358 CU_ASSERT(ctx[1].examine_config_count == 1); 7359 CU_ASSERT(ctx[1].examine_disk_count == 0); 7360 CU_ASSERT(ctx[1].desc == NULL); 7361 reset_examine_claimed_ctx(); 7362 free_bdev(bdev); 7363 7364 /* 7365 * With two modules claiming, both modules' examine_config and examine_disk should be 7366 * called. 7367 */ 7368 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7369 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7370 bdev = allocate_bdev("bdev0"); 7371 CU_ASSERT(ctx[0].examine_config_count == 1); 7372 CU_ASSERT(ctx[0].examine_disk_count == 1); 7373 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7374 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7375 CU_ASSERT(ctx[1].examine_config_count == 1); 7376 CU_ASSERT(ctx[1].examine_disk_count == 1); 7377 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7378 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7379 reset_examine_claimed_ctx(); 7380 free_bdev(bdev); 7381 7382 /* 7383 * If two vbdev modules try to claim with conflicting claim types, the module that was added 7384 * last wins. The winner gets the claim and is the only one that has its examine_disk 7385 * callback invoked. 7386 */ 7387 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7388 ctx[0].expect_claim_err = -EPERM; 7389 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 7390 bdev = allocate_bdev("bdev0"); 7391 CU_ASSERT(ctx[0].examine_config_count == 1); 7392 CU_ASSERT(ctx[0].examine_disk_count == 0); 7393 CU_ASSERT(ctx[1].examine_config_count == 1); 7394 CU_ASSERT(ctx[1].examine_disk_count == 1); 7395 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7396 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7397 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7398 reset_examine_claimed_ctx(); 7399 free_bdev(bdev); 7400 7401 ut_testing_examine_claimed = false; 7402 } 7403 7404 int 7405 main(int argc, char **argv) 7406 { 7407 CU_pSuite suite = NULL; 7408 unsigned int num_failures; 7409 7410 CU_initialize_registry(); 7411 7412 suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown); 7413 7414 CU_ADD_TEST(suite, bytes_to_blocks_test); 7415 CU_ADD_TEST(suite, num_blocks_test); 7416 CU_ADD_TEST(suite, io_valid_test); 7417 CU_ADD_TEST(suite, open_write_test); 7418 CU_ADD_TEST(suite, claim_test); 7419 CU_ADD_TEST(suite, alias_add_del_test); 7420 CU_ADD_TEST(suite, get_device_stat_test); 7421 CU_ADD_TEST(suite, bdev_io_types_test); 7422 CU_ADD_TEST(suite, bdev_io_wait_test); 7423 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7424 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7425 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7426 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7427 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7428 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7429 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7430 CU_ADD_TEST(suite, bdev_io_alignment); 7431 CU_ADD_TEST(suite, bdev_histograms); 7432 CU_ADD_TEST(suite, bdev_write_zeroes); 7433 CU_ADD_TEST(suite, bdev_compare_and_write); 7434 CU_ADD_TEST(suite, bdev_compare); 7435 CU_ADD_TEST(suite, bdev_compare_emulated); 7436 CU_ADD_TEST(suite, bdev_zcopy_write); 7437 CU_ADD_TEST(suite, bdev_zcopy_read); 7438 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7439 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7440 CU_ADD_TEST(suite, bdev_open_ext_test); 7441 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7442 CU_ADD_TEST(suite, bdev_set_io_timeout); 7443 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7444 CU_ADD_TEST(suite, lba_range_overlap); 7445 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7446 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7447 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7448 CU_ADD_TEST(suite, bdev_quiesce); 7449 CU_ADD_TEST(suite, bdev_io_abort); 7450 CU_ADD_TEST(suite, bdev_unmap); 7451 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7452 CU_ADD_TEST(suite, bdev_set_options_test); 7453 CU_ADD_TEST(suite, bdev_get_memory_domains); 7454 CU_ADD_TEST(suite, bdev_io_ext); 7455 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7456 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7457 CU_ADD_TEST(suite, bdev_io_ext_split); 7458 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7459 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7460 CU_ADD_TEST(suite, bdev_unregister_by_name); 7461 CU_ADD_TEST(suite, for_each_bdev_test); 7462 CU_ADD_TEST(suite, bdev_seek_test); 7463 CU_ADD_TEST(suite, bdev_copy); 7464 CU_ADD_TEST(suite, bdev_copy_split_test); 7465 CU_ADD_TEST(suite, examine_locks); 7466 CU_ADD_TEST(suite, claim_v2_rwo); 7467 CU_ADD_TEST(suite, claim_v2_rom); 7468 CU_ADD_TEST(suite, claim_v2_rwm); 7469 CU_ADD_TEST(suite, claim_v2_existing_writer); 7470 CU_ADD_TEST(suite, claim_v2_existing_v1); 7471 CU_ADD_TEST(suite, claim_v1_existing_v2); 7472 CU_ADD_TEST(suite, examine_claimed); 7473 7474 allocate_cores(1); 7475 allocate_threads(1); 7476 set_thread(0); 7477 7478 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7479 CU_cleanup_registry(); 7480 7481 free_threads(); 7482 free_cores(); 7483 7484 return num_failures; 7485 } 7486