1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 DEFINE_STUB(spdk_accel_sequence_finish, int, 25 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 26 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 27 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 28 DEFINE_STUB(spdk_accel_append_copy, int, 29 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs, 30 uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 31 struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, 32 void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 33 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL); 34 35 static bool g_memory_domain_pull_data_called; 36 static bool g_memory_domain_push_data_called; 37 static int g_accel_io_device; 38 39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 40 int 41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 42 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 43 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 44 { 45 g_memory_domain_pull_data_called = true; 46 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 47 cpl_cb(cpl_cb_arg, 0); 48 return 0; 49 } 50 51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 52 int 53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 54 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 55 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 56 { 57 g_memory_domain_push_data_called = true; 58 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 59 cpl_cb(cpl_cb_arg, 0); 60 return 0; 61 } 62 63 struct spdk_io_channel * 64 spdk_accel_get_io_channel(void) 65 { 66 return spdk_get_io_channel(&g_accel_io_device); 67 } 68 69 int g_status; 70 int g_count; 71 enum spdk_bdev_event_type g_event_type1; 72 enum spdk_bdev_event_type g_event_type2; 73 enum spdk_bdev_event_type g_event_type3; 74 enum spdk_bdev_event_type g_event_type4; 75 struct spdk_histogram_data *g_histogram; 76 void *g_unregister_arg; 77 int g_unregister_rc; 78 79 void 80 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 81 int *sc, int *sk, int *asc, int *ascq) 82 { 83 } 84 85 static int 86 ut_accel_ch_create_cb(void *io_device, void *ctx) 87 { 88 return 0; 89 } 90 91 static void 92 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 93 { 94 } 95 96 static int 97 ut_bdev_setup(void) 98 { 99 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 100 ut_accel_ch_destroy_cb, 0, NULL); 101 return 0; 102 } 103 104 static int 105 ut_bdev_teardown(void) 106 { 107 spdk_io_device_unregister(&g_accel_io_device, NULL); 108 109 return 0; 110 } 111 112 static int 113 stub_destruct(void *ctx) 114 { 115 return 0; 116 } 117 118 struct ut_expected_io { 119 uint8_t type; 120 uint64_t offset; 121 uint64_t src_offset; 122 uint64_t length; 123 int iovcnt; 124 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 125 void *md_buf; 126 TAILQ_ENTRY(ut_expected_io) link; 127 }; 128 129 struct bdev_ut_channel { 130 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 131 uint32_t outstanding_io_count; 132 TAILQ_HEAD(, ut_expected_io) expected_io; 133 }; 134 135 static bool g_io_done; 136 static struct spdk_bdev_io *g_bdev_io; 137 static enum spdk_bdev_io_status g_io_status; 138 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 139 static uint32_t g_bdev_ut_io_device; 140 static struct bdev_ut_channel *g_bdev_ut_channel; 141 static void *g_compare_read_buf; 142 static uint32_t g_compare_read_buf_len; 143 static void *g_compare_write_buf; 144 static uint32_t g_compare_write_buf_len; 145 static void *g_compare_md_buf; 146 static bool g_abort_done; 147 static enum spdk_bdev_io_status g_abort_status; 148 static void *g_zcopy_read_buf; 149 static uint32_t g_zcopy_read_buf_len; 150 static void *g_zcopy_write_buf; 151 static uint32_t g_zcopy_write_buf_len; 152 static struct spdk_bdev_io *g_zcopy_bdev_io; 153 static uint64_t g_seek_data_offset; 154 static uint64_t g_seek_hole_offset; 155 static uint64_t g_seek_offset; 156 157 static struct ut_expected_io * 158 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 159 { 160 struct ut_expected_io *expected_io; 161 162 expected_io = calloc(1, sizeof(*expected_io)); 163 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 164 165 expected_io->type = type; 166 expected_io->offset = offset; 167 expected_io->length = length; 168 expected_io->iovcnt = iovcnt; 169 170 return expected_io; 171 } 172 173 static struct ut_expected_io * 174 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 175 { 176 struct ut_expected_io *expected_io; 177 178 expected_io = calloc(1, sizeof(*expected_io)); 179 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 180 181 expected_io->type = type; 182 expected_io->offset = offset; 183 expected_io->src_offset = src_offset; 184 expected_io->length = length; 185 186 return expected_io; 187 } 188 189 static void 190 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 191 { 192 expected_io->iov[pos].iov_base = base; 193 expected_io->iov[pos].iov_len = len; 194 } 195 196 static void 197 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 198 { 199 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 200 struct ut_expected_io *expected_io; 201 struct iovec *iov, *expected_iov; 202 struct spdk_bdev_io *bio_to_abort; 203 int i; 204 205 g_bdev_io = bdev_io; 206 207 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 208 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 209 210 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 211 CU_ASSERT(g_compare_read_buf_len == len); 212 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 213 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 214 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 215 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 216 } 217 } 218 219 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 220 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 221 222 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 223 CU_ASSERT(g_compare_write_buf_len == len); 224 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 225 } 226 227 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 228 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 229 230 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 231 CU_ASSERT(g_compare_read_buf_len == len); 232 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 233 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 234 } 235 if (bdev_io->u.bdev.md_buf && 236 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 237 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 238 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 239 } 240 } 241 242 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 243 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 244 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 245 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 246 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 247 ch->outstanding_io_count--; 248 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 249 break; 250 } 251 } 252 } 253 } 254 255 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 256 if (bdev_io->u.bdev.zcopy.start) { 257 g_zcopy_bdev_io = bdev_io; 258 if (bdev_io->u.bdev.zcopy.populate) { 259 /* Start of a read */ 260 CU_ASSERT(g_zcopy_read_buf != NULL); 261 CU_ASSERT(g_zcopy_read_buf_len > 0); 262 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 263 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 264 bdev_io->u.bdev.iovcnt = 1; 265 } else { 266 /* Start of a write */ 267 CU_ASSERT(g_zcopy_write_buf != NULL); 268 CU_ASSERT(g_zcopy_write_buf_len > 0); 269 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 270 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 271 bdev_io->u.bdev.iovcnt = 1; 272 } 273 } else { 274 if (bdev_io->u.bdev.zcopy.commit) { 275 /* End of write */ 276 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 277 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 278 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 279 g_zcopy_write_buf = NULL; 280 g_zcopy_write_buf_len = 0; 281 } else { 282 /* End of read */ 283 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 284 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 285 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 286 g_zcopy_read_buf = NULL; 287 g_zcopy_read_buf_len = 0; 288 } 289 } 290 } 291 292 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 293 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 294 } 295 296 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 297 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 298 } 299 300 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 301 ch->outstanding_io_count++; 302 303 expected_io = TAILQ_FIRST(&ch->expected_io); 304 if (expected_io == NULL) { 305 return; 306 } 307 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 308 309 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 310 CU_ASSERT(bdev_io->type == expected_io->type); 311 } 312 313 if (expected_io->md_buf != NULL) { 314 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 315 } 316 317 if (expected_io->length == 0) { 318 free(expected_io); 319 return; 320 } 321 322 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 323 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 324 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 325 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 326 } 327 328 if (expected_io->iovcnt == 0) { 329 free(expected_io); 330 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 331 return; 332 } 333 334 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 335 for (i = 0; i < expected_io->iovcnt; i++) { 336 expected_iov = &expected_io->iov[i]; 337 if (bdev_io->internal.orig_iovcnt == 0) { 338 iov = &bdev_io->u.bdev.iovs[i]; 339 } else { 340 iov = bdev_io->internal.orig_iovs; 341 } 342 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 343 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 344 } 345 346 free(expected_io); 347 } 348 349 static void 350 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 351 struct spdk_bdev_io *bdev_io, bool success) 352 { 353 CU_ASSERT(success == true); 354 355 stub_submit_request(_ch, bdev_io); 356 } 357 358 static void 359 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 360 { 361 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 362 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 363 } 364 365 static uint32_t 366 stub_complete_io(uint32_t num_to_complete) 367 { 368 struct bdev_ut_channel *ch = g_bdev_ut_channel; 369 struct spdk_bdev_io *bdev_io; 370 static enum spdk_bdev_io_status io_status; 371 uint32_t num_completed = 0; 372 373 while (num_completed < num_to_complete) { 374 if (TAILQ_EMPTY(&ch->outstanding_io)) { 375 break; 376 } 377 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 378 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 379 ch->outstanding_io_count--; 380 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 381 g_io_exp_status; 382 spdk_bdev_io_complete(bdev_io, io_status); 383 num_completed++; 384 } 385 386 return num_completed; 387 } 388 389 static struct spdk_io_channel * 390 bdev_ut_get_io_channel(void *ctx) 391 { 392 return spdk_get_io_channel(&g_bdev_ut_io_device); 393 } 394 395 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 396 [SPDK_BDEV_IO_TYPE_READ] = true, 397 [SPDK_BDEV_IO_TYPE_WRITE] = true, 398 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 399 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 400 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 401 [SPDK_BDEV_IO_TYPE_RESET] = true, 402 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 403 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 404 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 405 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 406 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 407 [SPDK_BDEV_IO_TYPE_ABORT] = true, 408 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 409 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 410 [SPDK_BDEV_IO_TYPE_COPY] = true, 411 }; 412 413 static void 414 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 415 { 416 g_io_types_supported[io_type] = enable; 417 } 418 419 static bool 420 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 421 { 422 return g_io_types_supported[io_type]; 423 } 424 425 static struct spdk_bdev_fn_table fn_table = { 426 .destruct = stub_destruct, 427 .submit_request = stub_submit_request, 428 .get_io_channel = bdev_ut_get_io_channel, 429 .io_type_supported = stub_io_type_supported, 430 }; 431 432 static int 433 bdev_ut_create_ch(void *io_device, void *ctx_buf) 434 { 435 struct bdev_ut_channel *ch = ctx_buf; 436 437 CU_ASSERT(g_bdev_ut_channel == NULL); 438 g_bdev_ut_channel = ch; 439 440 TAILQ_INIT(&ch->outstanding_io); 441 ch->outstanding_io_count = 0; 442 TAILQ_INIT(&ch->expected_io); 443 return 0; 444 } 445 446 static void 447 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 448 { 449 CU_ASSERT(g_bdev_ut_channel != NULL); 450 g_bdev_ut_channel = NULL; 451 } 452 453 struct spdk_bdev_module bdev_ut_if; 454 455 static int 456 bdev_ut_module_init(void) 457 { 458 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 459 sizeof(struct bdev_ut_channel), NULL); 460 spdk_bdev_module_init_done(&bdev_ut_if); 461 return 0; 462 } 463 464 static void 465 bdev_ut_module_fini(void) 466 { 467 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 468 } 469 470 struct spdk_bdev_module bdev_ut_if = { 471 .name = "bdev_ut", 472 .module_init = bdev_ut_module_init, 473 .module_fini = bdev_ut_module_fini, 474 .async_init = true, 475 }; 476 477 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 478 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 479 480 static int 481 vbdev_ut_module_init(void) 482 { 483 return 0; 484 } 485 486 static void 487 vbdev_ut_module_fini(void) 488 { 489 } 490 491 struct spdk_bdev_module vbdev_ut_if = { 492 .name = "vbdev_ut", 493 .module_init = vbdev_ut_module_init, 494 .module_fini = vbdev_ut_module_fini, 495 .examine_config = vbdev_ut_examine_config, 496 .examine_disk = vbdev_ut_examine_disk, 497 }; 498 499 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 500 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 501 502 struct ut_examine_ctx { 503 void (*examine_config)(struct spdk_bdev *bdev); 504 void (*examine_disk)(struct spdk_bdev *bdev); 505 uint32_t examine_config_count; 506 uint32_t examine_disk_count; 507 }; 508 509 static void 510 vbdev_ut_examine_config(struct spdk_bdev *bdev) 511 { 512 struct ut_examine_ctx *ctx = bdev->ctxt; 513 514 if (ctx != NULL) { 515 ctx->examine_config_count++; 516 if (ctx->examine_config != NULL) { 517 ctx->examine_config(bdev); 518 } 519 } 520 521 spdk_bdev_module_examine_done(&vbdev_ut_if); 522 } 523 524 static void 525 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 526 { 527 struct ut_examine_ctx *ctx = bdev->ctxt; 528 529 if (ctx != NULL) { 530 ctx->examine_disk_count++; 531 if (ctx->examine_disk != NULL) { 532 ctx->examine_disk(bdev); 533 } 534 } 535 536 spdk_bdev_module_examine_done(&vbdev_ut_if); 537 } 538 539 static struct spdk_bdev * 540 allocate_bdev_ctx(char *name, void *ctx) 541 { 542 struct spdk_bdev *bdev; 543 int rc; 544 545 bdev = calloc(1, sizeof(*bdev)); 546 SPDK_CU_ASSERT_FATAL(bdev != NULL); 547 548 bdev->ctxt = ctx; 549 bdev->name = name; 550 bdev->fn_table = &fn_table; 551 bdev->module = &bdev_ut_if; 552 bdev->blockcnt = 1024; 553 bdev->blocklen = 512; 554 555 spdk_uuid_generate(&bdev->uuid); 556 557 rc = spdk_bdev_register(bdev); 558 poll_threads(); 559 CU_ASSERT(rc == 0); 560 561 return bdev; 562 } 563 564 static struct spdk_bdev * 565 allocate_bdev(char *name) 566 { 567 return allocate_bdev_ctx(name, NULL); 568 } 569 570 static struct spdk_bdev * 571 allocate_vbdev(char *name) 572 { 573 struct spdk_bdev *bdev; 574 int rc; 575 576 bdev = calloc(1, sizeof(*bdev)); 577 SPDK_CU_ASSERT_FATAL(bdev != NULL); 578 579 bdev->name = name; 580 bdev->fn_table = &fn_table; 581 bdev->module = &vbdev_ut_if; 582 583 rc = spdk_bdev_register(bdev); 584 poll_threads(); 585 CU_ASSERT(rc == 0); 586 587 return bdev; 588 } 589 590 static void 591 free_bdev(struct spdk_bdev *bdev) 592 { 593 spdk_bdev_unregister(bdev, NULL, NULL); 594 poll_threads(); 595 memset(bdev, 0xFF, sizeof(*bdev)); 596 free(bdev); 597 } 598 599 static void 600 free_vbdev(struct spdk_bdev *bdev) 601 { 602 spdk_bdev_unregister(bdev, NULL, NULL); 603 poll_threads(); 604 memset(bdev, 0xFF, sizeof(*bdev)); 605 free(bdev); 606 } 607 608 static void 609 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 610 { 611 const char *bdev_name; 612 613 CU_ASSERT(bdev != NULL); 614 CU_ASSERT(rc == 0); 615 bdev_name = spdk_bdev_get_name(bdev); 616 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 617 618 free(stat); 619 620 *(bool *)cb_arg = true; 621 } 622 623 static void 624 bdev_unregister_cb(void *cb_arg, int rc) 625 { 626 g_unregister_arg = cb_arg; 627 g_unregister_rc = rc; 628 } 629 630 static void 631 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 632 { 633 } 634 635 static void 636 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 637 { 638 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 639 640 g_event_type1 = type; 641 if (SPDK_BDEV_EVENT_REMOVE == type) { 642 spdk_bdev_close(desc); 643 } 644 } 645 646 static void 647 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 648 { 649 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 650 651 g_event_type2 = type; 652 if (SPDK_BDEV_EVENT_REMOVE == type) { 653 spdk_bdev_close(desc); 654 } 655 } 656 657 static void 658 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 659 { 660 g_event_type3 = type; 661 } 662 663 static void 664 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 665 { 666 g_event_type4 = type; 667 } 668 669 static void 670 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 671 { 672 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 673 spdk_bdev_free_io(bdev_io); 674 } 675 676 static void 677 get_device_stat_test(void) 678 { 679 struct spdk_bdev *bdev; 680 struct spdk_bdev_io_stat *stat; 681 bool done; 682 683 bdev = allocate_bdev("bdev0"); 684 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 685 if (stat == NULL) { 686 free_bdev(bdev); 687 return; 688 } 689 690 done = false; 691 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 692 while (!done) { poll_threads(); } 693 694 free_bdev(bdev); 695 } 696 697 static void 698 open_write_test(void) 699 { 700 struct spdk_bdev *bdev[9]; 701 struct spdk_bdev_desc *desc[9] = {}; 702 int rc; 703 704 /* 705 * Create a tree of bdevs to test various open w/ write cases. 706 * 707 * bdev0 through bdev3 are physical block devices, such as NVMe 708 * namespaces or Ceph block devices. 709 * 710 * bdev4 is a virtual bdev with multiple base bdevs. This models 711 * caching or RAID use cases. 712 * 713 * bdev5 through bdev7 are all virtual bdevs with the same base 714 * bdev (except bdev7). This models partitioning or logical volume 715 * use cases. 716 * 717 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 718 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 719 * models caching, RAID, partitioning or logical volumes use cases. 720 * 721 * bdev8 is a virtual bdev with multiple base bdevs, but these 722 * base bdevs are themselves virtual bdevs. 723 * 724 * bdev8 725 * | 726 * +----------+ 727 * | | 728 * bdev4 bdev5 bdev6 bdev7 729 * | | | | 730 * +---+---+ +---+ + +---+---+ 731 * | | \ | / \ 732 * bdev0 bdev1 bdev2 bdev3 733 */ 734 735 bdev[0] = allocate_bdev("bdev0"); 736 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 737 CU_ASSERT(rc == 0); 738 739 bdev[1] = allocate_bdev("bdev1"); 740 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 741 CU_ASSERT(rc == 0); 742 743 bdev[2] = allocate_bdev("bdev2"); 744 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 745 CU_ASSERT(rc == 0); 746 747 bdev[3] = allocate_bdev("bdev3"); 748 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 749 CU_ASSERT(rc == 0); 750 751 bdev[4] = allocate_vbdev("bdev4"); 752 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 753 CU_ASSERT(rc == 0); 754 755 bdev[5] = allocate_vbdev("bdev5"); 756 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 757 CU_ASSERT(rc == 0); 758 759 bdev[6] = allocate_vbdev("bdev6"); 760 761 bdev[7] = allocate_vbdev("bdev7"); 762 763 bdev[8] = allocate_vbdev("bdev8"); 764 765 /* Open bdev0 read-only. This should succeed. */ 766 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 767 CU_ASSERT(rc == 0); 768 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 769 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 770 spdk_bdev_close(desc[0]); 771 772 /* 773 * Open bdev1 read/write. This should fail since bdev1 has been claimed 774 * by a vbdev module. 775 */ 776 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 777 CU_ASSERT(rc == -EPERM); 778 779 /* 780 * Open bdev4 read/write. This should fail since bdev3 has been claimed 781 * by a vbdev module. 782 */ 783 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 784 CU_ASSERT(rc == -EPERM); 785 786 /* Open bdev4 read-only. This should succeed. */ 787 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 788 CU_ASSERT(rc == 0); 789 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 790 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 791 spdk_bdev_close(desc[4]); 792 793 /* 794 * Open bdev8 read/write. This should succeed since it is a leaf 795 * bdev. 796 */ 797 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 798 CU_ASSERT(rc == 0); 799 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 800 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 801 spdk_bdev_close(desc[8]); 802 803 /* 804 * Open bdev5 read/write. This should fail since bdev4 has been claimed 805 * by a vbdev module. 806 */ 807 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 808 CU_ASSERT(rc == -EPERM); 809 810 /* Open bdev4 read-only. This should succeed. */ 811 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 812 CU_ASSERT(rc == 0); 813 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 814 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 815 spdk_bdev_close(desc[5]); 816 817 free_vbdev(bdev[8]); 818 819 free_vbdev(bdev[5]); 820 free_vbdev(bdev[6]); 821 free_vbdev(bdev[7]); 822 823 free_vbdev(bdev[4]); 824 825 free_bdev(bdev[0]); 826 free_bdev(bdev[1]); 827 free_bdev(bdev[2]); 828 free_bdev(bdev[3]); 829 } 830 831 static void 832 claim_test(void) 833 { 834 struct spdk_bdev *bdev; 835 struct spdk_bdev_desc *desc, *open_desc; 836 int rc; 837 uint32_t count; 838 839 /* 840 * A vbdev that uses a read-only bdev may need it to remain read-only. 841 * To do so, it opens the bdev read-only, then claims it without 842 * passing a spdk_bdev_desc. 843 */ 844 bdev = allocate_bdev("bdev0"); 845 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 846 CU_ASSERT(rc == 0); 847 CU_ASSERT(desc->write == false); 848 849 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 850 CU_ASSERT(rc == 0); 851 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 852 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 853 854 /* There should be only one open descriptor and it should still be ro */ 855 count = 0; 856 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 857 CU_ASSERT(open_desc == desc); 858 CU_ASSERT(!open_desc->write); 859 count++; 860 } 861 CU_ASSERT(count == 1); 862 863 /* A read-only bdev is upgraded to read-write if desc is passed. */ 864 spdk_bdev_module_release_bdev(bdev); 865 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 866 CU_ASSERT(rc == 0); 867 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 868 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 869 870 /* There should be only one open descriptor and it should be rw */ 871 count = 0; 872 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 873 CU_ASSERT(open_desc == desc); 874 CU_ASSERT(open_desc->write); 875 count++; 876 } 877 CU_ASSERT(count == 1); 878 879 spdk_bdev_close(desc); 880 free_bdev(bdev); 881 } 882 883 static void 884 bytes_to_blocks_test(void) 885 { 886 struct spdk_bdev bdev; 887 uint64_t offset_blocks, num_blocks; 888 889 memset(&bdev, 0, sizeof(bdev)); 890 891 bdev.blocklen = 512; 892 893 /* All parameters valid */ 894 offset_blocks = 0; 895 num_blocks = 0; 896 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 897 CU_ASSERT(offset_blocks == 1); 898 CU_ASSERT(num_blocks == 2); 899 900 /* Offset not a block multiple */ 901 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 902 903 /* Length not a block multiple */ 904 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 905 906 /* In case blocklen not the power of two */ 907 bdev.blocklen = 100; 908 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 909 CU_ASSERT(offset_blocks == 1); 910 CU_ASSERT(num_blocks == 2); 911 912 /* Offset not a block multiple */ 913 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 914 915 /* Length not a block multiple */ 916 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 917 } 918 919 static void 920 num_blocks_test(void) 921 { 922 struct spdk_bdev bdev; 923 struct spdk_bdev_desc *desc = NULL; 924 int rc; 925 926 memset(&bdev, 0, sizeof(bdev)); 927 bdev.name = "num_blocks"; 928 bdev.fn_table = &fn_table; 929 bdev.module = &bdev_ut_if; 930 spdk_bdev_register(&bdev); 931 poll_threads(); 932 spdk_bdev_notify_blockcnt_change(&bdev, 50); 933 934 /* Growing block number */ 935 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 936 /* Shrinking block number */ 937 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 938 939 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 940 CU_ASSERT(rc == 0); 941 SPDK_CU_ASSERT_FATAL(desc != NULL); 942 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 943 944 /* Growing block number */ 945 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 946 /* Shrinking block number */ 947 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 948 949 g_event_type1 = 0xFF; 950 /* Growing block number */ 951 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 952 953 poll_threads(); 954 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 955 956 g_event_type1 = 0xFF; 957 /* Growing block number and closing */ 958 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 959 960 spdk_bdev_close(desc); 961 spdk_bdev_unregister(&bdev, NULL, NULL); 962 963 poll_threads(); 964 965 /* Callback is not called for closed device */ 966 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 967 } 968 969 static void 970 io_valid_test(void) 971 { 972 struct spdk_bdev bdev; 973 974 memset(&bdev, 0, sizeof(bdev)); 975 976 bdev.blocklen = 512; 977 spdk_spin_init(&bdev.internal.spinlock); 978 979 spdk_bdev_notify_blockcnt_change(&bdev, 100); 980 981 /* All parameters valid */ 982 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 983 984 /* Last valid block */ 985 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 986 987 /* Offset past end of bdev */ 988 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 989 990 /* Offset + length past end of bdev */ 991 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 992 993 /* Offset near end of uint64_t range (2^64 - 1) */ 994 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 995 996 spdk_spin_destroy(&bdev.internal.spinlock); 997 } 998 999 static void 1000 alias_add_del_test(void) 1001 { 1002 struct spdk_bdev *bdev[3]; 1003 int rc; 1004 1005 /* Creating and registering bdevs */ 1006 bdev[0] = allocate_bdev("bdev0"); 1007 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 1008 1009 bdev[1] = allocate_bdev("bdev1"); 1010 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 1011 1012 bdev[2] = allocate_bdev("bdev2"); 1013 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 1014 1015 poll_threads(); 1016 1017 /* 1018 * Trying adding an alias identical to name. 1019 * Alias is identical to name, so it can not be added to aliases list 1020 */ 1021 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 1022 CU_ASSERT(rc == -EEXIST); 1023 1024 /* 1025 * Trying to add empty alias, 1026 * this one should fail 1027 */ 1028 rc = spdk_bdev_alias_add(bdev[0], NULL); 1029 CU_ASSERT(rc == -EINVAL); 1030 1031 /* Trying adding same alias to two different registered bdevs */ 1032 1033 /* Alias is used first time, so this one should pass */ 1034 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1035 CU_ASSERT(rc == 0); 1036 1037 /* Alias was added to another bdev, so this one should fail */ 1038 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1039 CU_ASSERT(rc == -EEXIST); 1040 1041 /* Alias is used first time, so this one should pass */ 1042 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1043 CU_ASSERT(rc == 0); 1044 1045 /* Trying removing an alias from registered bdevs */ 1046 1047 /* Alias is not on a bdev aliases list, so this one should fail */ 1048 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1049 CU_ASSERT(rc == -ENOENT); 1050 1051 /* Alias is present on a bdev aliases list, so this one should pass */ 1052 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1053 CU_ASSERT(rc == 0); 1054 1055 /* Alias is present on a bdev aliases list, so this one should pass */ 1056 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1057 CU_ASSERT(rc == 0); 1058 1059 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1060 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1061 CU_ASSERT(rc != 0); 1062 1063 /* Trying to del all alias from empty alias list */ 1064 spdk_bdev_alias_del_all(bdev[2]); 1065 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1066 1067 /* Trying to del all alias from non-empty alias list */ 1068 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1069 CU_ASSERT(rc == 0); 1070 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1071 CU_ASSERT(rc == 0); 1072 spdk_bdev_alias_del_all(bdev[2]); 1073 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1074 1075 /* Unregister and free bdevs */ 1076 spdk_bdev_unregister(bdev[0], NULL, NULL); 1077 spdk_bdev_unregister(bdev[1], NULL, NULL); 1078 spdk_bdev_unregister(bdev[2], NULL, NULL); 1079 1080 poll_threads(); 1081 1082 free(bdev[0]); 1083 free(bdev[1]); 1084 free(bdev[2]); 1085 } 1086 1087 static void 1088 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1089 { 1090 g_io_done = true; 1091 g_io_status = bdev_io->internal.status; 1092 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1093 (bdev_io->u.bdev.zcopy.start)) { 1094 g_zcopy_bdev_io = bdev_io; 1095 } else { 1096 spdk_bdev_free_io(bdev_io); 1097 g_zcopy_bdev_io = NULL; 1098 } 1099 } 1100 1101 static void 1102 bdev_init_cb(void *arg, int rc) 1103 { 1104 CU_ASSERT(rc == 0); 1105 } 1106 1107 static void 1108 bdev_fini_cb(void *arg) 1109 { 1110 } 1111 1112 static void 1113 ut_init_bdev(struct spdk_bdev_opts *opts) 1114 { 1115 int rc; 1116 1117 if (opts != NULL) { 1118 rc = spdk_bdev_set_opts(opts); 1119 CU_ASSERT(rc == 0); 1120 } 1121 rc = spdk_iobuf_initialize(); 1122 CU_ASSERT(rc == 0); 1123 spdk_bdev_initialize(bdev_init_cb, NULL); 1124 poll_threads(); 1125 } 1126 1127 static void 1128 ut_fini_bdev(void) 1129 { 1130 spdk_bdev_finish(bdev_fini_cb, NULL); 1131 spdk_iobuf_finish(bdev_fini_cb, NULL); 1132 poll_threads(); 1133 } 1134 1135 struct bdev_ut_io_wait_entry { 1136 struct spdk_bdev_io_wait_entry entry; 1137 struct spdk_io_channel *io_ch; 1138 struct spdk_bdev_desc *desc; 1139 bool submitted; 1140 }; 1141 1142 static void 1143 io_wait_cb(void *arg) 1144 { 1145 struct bdev_ut_io_wait_entry *entry = arg; 1146 int rc; 1147 1148 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1149 CU_ASSERT(rc == 0); 1150 entry->submitted = true; 1151 } 1152 1153 static void 1154 bdev_io_types_test(void) 1155 { 1156 struct spdk_bdev *bdev; 1157 struct spdk_bdev_desc *desc = NULL; 1158 struct spdk_io_channel *io_ch; 1159 struct spdk_bdev_opts bdev_opts = {}; 1160 int rc; 1161 1162 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1163 bdev_opts.bdev_io_pool_size = 4; 1164 bdev_opts.bdev_io_cache_size = 2; 1165 ut_init_bdev(&bdev_opts); 1166 1167 bdev = allocate_bdev("bdev0"); 1168 1169 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1170 CU_ASSERT(rc == 0); 1171 poll_threads(); 1172 SPDK_CU_ASSERT_FATAL(desc != NULL); 1173 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1174 io_ch = spdk_bdev_get_io_channel(desc); 1175 CU_ASSERT(io_ch != NULL); 1176 1177 /* WRITE and WRITE ZEROES are not supported */ 1178 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1179 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1180 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1181 CU_ASSERT(rc == -ENOTSUP); 1182 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1183 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1184 1185 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1186 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1187 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1188 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1189 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1190 CU_ASSERT(rc == -ENOTSUP); 1191 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1192 CU_ASSERT(rc == -ENOTSUP); 1193 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1194 CU_ASSERT(rc == -ENOTSUP); 1195 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1196 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1197 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1198 1199 spdk_put_io_channel(io_ch); 1200 spdk_bdev_close(desc); 1201 free_bdev(bdev); 1202 ut_fini_bdev(); 1203 } 1204 1205 static void 1206 bdev_io_wait_test(void) 1207 { 1208 struct spdk_bdev *bdev; 1209 struct spdk_bdev_desc *desc = NULL; 1210 struct spdk_io_channel *io_ch; 1211 struct spdk_bdev_opts bdev_opts = {}; 1212 struct bdev_ut_io_wait_entry io_wait_entry; 1213 struct bdev_ut_io_wait_entry io_wait_entry2; 1214 int rc; 1215 1216 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1217 bdev_opts.bdev_io_pool_size = 4; 1218 bdev_opts.bdev_io_cache_size = 2; 1219 ut_init_bdev(&bdev_opts); 1220 1221 bdev = allocate_bdev("bdev0"); 1222 1223 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1224 CU_ASSERT(rc == 0); 1225 poll_threads(); 1226 SPDK_CU_ASSERT_FATAL(desc != NULL); 1227 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1228 io_ch = spdk_bdev_get_io_channel(desc); 1229 CU_ASSERT(io_ch != NULL); 1230 1231 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1232 CU_ASSERT(rc == 0); 1233 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1234 CU_ASSERT(rc == 0); 1235 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1236 CU_ASSERT(rc == 0); 1237 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1238 CU_ASSERT(rc == 0); 1239 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1240 1241 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1242 CU_ASSERT(rc == -ENOMEM); 1243 1244 io_wait_entry.entry.bdev = bdev; 1245 io_wait_entry.entry.cb_fn = io_wait_cb; 1246 io_wait_entry.entry.cb_arg = &io_wait_entry; 1247 io_wait_entry.io_ch = io_ch; 1248 io_wait_entry.desc = desc; 1249 io_wait_entry.submitted = false; 1250 /* Cannot use the same io_wait_entry for two different calls. */ 1251 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1252 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1253 1254 /* Queue two I/O waits. */ 1255 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1256 CU_ASSERT(rc == 0); 1257 CU_ASSERT(io_wait_entry.submitted == false); 1258 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1259 CU_ASSERT(rc == 0); 1260 CU_ASSERT(io_wait_entry2.submitted == false); 1261 1262 stub_complete_io(1); 1263 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1264 CU_ASSERT(io_wait_entry.submitted == true); 1265 CU_ASSERT(io_wait_entry2.submitted == false); 1266 1267 stub_complete_io(1); 1268 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1269 CU_ASSERT(io_wait_entry2.submitted == true); 1270 1271 stub_complete_io(4); 1272 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1273 1274 spdk_put_io_channel(io_ch); 1275 spdk_bdev_close(desc); 1276 free_bdev(bdev); 1277 ut_fini_bdev(); 1278 } 1279 1280 static void 1281 bdev_io_spans_split_test(void) 1282 { 1283 struct spdk_bdev bdev; 1284 struct spdk_bdev_io bdev_io; 1285 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1286 1287 memset(&bdev, 0, sizeof(bdev)); 1288 bdev_io.u.bdev.iovs = iov; 1289 1290 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1291 bdev.optimal_io_boundary = 0; 1292 bdev.max_segment_size = 0; 1293 bdev.max_num_segments = 0; 1294 bdev_io.bdev = &bdev; 1295 1296 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1297 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1298 1299 bdev.split_on_optimal_io_boundary = true; 1300 bdev.optimal_io_boundary = 32; 1301 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1302 1303 /* RESETs are not based on LBAs - so this should return false. */ 1304 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1305 1306 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1307 bdev_io.u.bdev.offset_blocks = 0; 1308 bdev_io.u.bdev.num_blocks = 32; 1309 1310 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1311 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1312 1313 bdev_io.u.bdev.num_blocks = 33; 1314 1315 /* This I/O spans a boundary. */ 1316 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1317 1318 bdev_io.u.bdev.num_blocks = 32; 1319 bdev.max_segment_size = 512 * 32; 1320 bdev.max_num_segments = 1; 1321 bdev_io.u.bdev.iovcnt = 1; 1322 iov[0].iov_len = 512; 1323 1324 /* Does not cross and exceed max_size or max_segs */ 1325 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1326 1327 bdev.split_on_optimal_io_boundary = false; 1328 bdev.max_segment_size = 512; 1329 bdev.max_num_segments = 1; 1330 bdev_io.u.bdev.iovcnt = 2; 1331 1332 /* Exceed max_segs */ 1333 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1334 1335 bdev.max_num_segments = 2; 1336 iov[0].iov_len = 513; 1337 iov[1].iov_len = 512; 1338 1339 /* Exceed max_sizes */ 1340 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1341 1342 bdev.max_segment_size = 0; 1343 bdev.write_unit_size = 32; 1344 bdev.split_on_write_unit = true; 1345 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1346 1347 /* This I/O is one write unit */ 1348 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1349 1350 bdev_io.u.bdev.num_blocks = 32 * 2; 1351 1352 /* This I/O is more than one write unit */ 1353 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1354 1355 bdev_io.u.bdev.offset_blocks = 1; 1356 bdev_io.u.bdev.num_blocks = 32; 1357 1358 /* This I/O is not aligned to write unit size */ 1359 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1360 } 1361 1362 static void 1363 bdev_io_boundary_split_test(void) 1364 { 1365 struct spdk_bdev *bdev; 1366 struct spdk_bdev_desc *desc = NULL; 1367 struct spdk_io_channel *io_ch; 1368 struct spdk_bdev_opts bdev_opts = {}; 1369 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1370 struct ut_expected_io *expected_io; 1371 void *md_buf = (void *)0xFF000000; 1372 uint64_t i; 1373 int rc; 1374 1375 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1376 bdev_opts.bdev_io_pool_size = 512; 1377 bdev_opts.bdev_io_cache_size = 64; 1378 ut_init_bdev(&bdev_opts); 1379 1380 bdev = allocate_bdev("bdev0"); 1381 1382 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1383 CU_ASSERT(rc == 0); 1384 SPDK_CU_ASSERT_FATAL(desc != NULL); 1385 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1386 io_ch = spdk_bdev_get_io_channel(desc); 1387 CU_ASSERT(io_ch != NULL); 1388 1389 bdev->optimal_io_boundary = 16; 1390 bdev->split_on_optimal_io_boundary = false; 1391 1392 g_io_done = false; 1393 1394 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1396 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1397 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1398 1399 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1400 CU_ASSERT(rc == 0); 1401 CU_ASSERT(g_io_done == false); 1402 1403 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1404 stub_complete_io(1); 1405 CU_ASSERT(g_io_done == true); 1406 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1407 1408 bdev->split_on_optimal_io_boundary = true; 1409 bdev->md_interleave = false; 1410 bdev->md_len = 8; 1411 1412 /* Now test that a single-vector command is split correctly. 1413 * Offset 14, length 8, payload 0xF000 1414 * Child - Offset 14, length 2, payload 0xF000 1415 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1416 * 1417 * Set up the expected values before calling spdk_bdev_read_blocks 1418 */ 1419 g_io_done = false; 1420 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1421 expected_io->md_buf = md_buf; 1422 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1423 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1424 1425 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1426 expected_io->md_buf = md_buf + 2 * 8; 1427 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1428 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1429 1430 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1431 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1432 14, 8, io_done, NULL); 1433 CU_ASSERT(rc == 0); 1434 CU_ASSERT(g_io_done == false); 1435 1436 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1437 stub_complete_io(2); 1438 CU_ASSERT(g_io_done == true); 1439 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1440 1441 /* Now set up a more complex, multi-vector command that needs to be split, 1442 * including splitting iovecs. 1443 */ 1444 iov[0].iov_base = (void *)0x10000; 1445 iov[0].iov_len = 512; 1446 iov[1].iov_base = (void *)0x20000; 1447 iov[1].iov_len = 20 * 512; 1448 iov[2].iov_base = (void *)0x30000; 1449 iov[2].iov_len = 11 * 512; 1450 1451 g_io_done = false; 1452 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1453 expected_io->md_buf = md_buf; 1454 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1455 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1456 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1457 1458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1459 expected_io->md_buf = md_buf + 2 * 8; 1460 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1461 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1462 1463 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1464 expected_io->md_buf = md_buf + 18 * 8; 1465 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1466 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1467 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1468 1469 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1470 14, 32, io_done, NULL); 1471 CU_ASSERT(rc == 0); 1472 CU_ASSERT(g_io_done == false); 1473 1474 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1475 stub_complete_io(3); 1476 CU_ASSERT(g_io_done == true); 1477 1478 /* Test multi vector command that needs to be split by strip and then needs to be 1479 * split further due to the capacity of child iovs. 1480 */ 1481 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1482 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1483 iov[i].iov_len = 512; 1484 } 1485 1486 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1487 g_io_done = false; 1488 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1489 SPDK_BDEV_IO_NUM_CHILD_IOV); 1490 expected_io->md_buf = md_buf; 1491 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1492 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1493 } 1494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1495 1496 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1497 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1498 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1499 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1500 ut_expected_io_set_iov(expected_io, i, 1501 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1502 } 1503 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1504 1505 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1506 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1507 CU_ASSERT(rc == 0); 1508 CU_ASSERT(g_io_done == false); 1509 1510 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1511 stub_complete_io(1); 1512 CU_ASSERT(g_io_done == false); 1513 1514 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1515 stub_complete_io(1); 1516 CU_ASSERT(g_io_done == true); 1517 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1518 1519 /* Test multi vector command that needs to be split by strip and then needs to be 1520 * split further due to the capacity of child iovs. In this case, the length of 1521 * the rest of iovec array with an I/O boundary is the multiple of block size. 1522 */ 1523 1524 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1525 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1526 */ 1527 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1528 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1529 iov[i].iov_len = 512; 1530 } 1531 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1532 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1533 iov[i].iov_len = 256; 1534 } 1535 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1536 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1537 1538 /* Add an extra iovec to trigger split */ 1539 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1540 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1541 1542 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1543 g_io_done = false; 1544 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1545 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1546 expected_io->md_buf = md_buf; 1547 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1548 ut_expected_io_set_iov(expected_io, i, 1549 (void *)((i + 1) * 0x10000), 512); 1550 } 1551 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1552 ut_expected_io_set_iov(expected_io, i, 1553 (void *)((i + 1) * 0x10000), 256); 1554 } 1555 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1556 1557 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1558 1, 1); 1559 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1560 ut_expected_io_set_iov(expected_io, 0, 1561 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1562 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1563 1564 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1565 1, 1); 1566 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1567 ut_expected_io_set_iov(expected_io, 0, 1568 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1569 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1570 1571 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1572 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1573 CU_ASSERT(rc == 0); 1574 CU_ASSERT(g_io_done == false); 1575 1576 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1577 stub_complete_io(1); 1578 CU_ASSERT(g_io_done == false); 1579 1580 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1581 stub_complete_io(2); 1582 CU_ASSERT(g_io_done == true); 1583 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1584 1585 /* Test multi vector command that needs to be split by strip and then needs to be 1586 * split further due to the capacity of child iovs, the child request offset should 1587 * be rewind to last aligned offset and go success without error. 1588 */ 1589 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1590 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1591 iov[i].iov_len = 512; 1592 } 1593 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1594 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1595 1596 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1597 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1598 1599 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1600 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1601 1602 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1603 g_io_done = false; 1604 g_io_status = 0; 1605 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1606 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1607 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1608 expected_io->md_buf = md_buf; 1609 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1610 ut_expected_io_set_iov(expected_io, i, 1611 (void *)((i + 1) * 0x10000), 512); 1612 } 1613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1614 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1615 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1616 1, 2); 1617 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1618 ut_expected_io_set_iov(expected_io, 0, 1619 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1620 ut_expected_io_set_iov(expected_io, 1, 1621 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1622 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1623 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1624 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1625 1, 1); 1626 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1627 ut_expected_io_set_iov(expected_io, 0, 1628 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1629 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1630 1631 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1632 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1633 CU_ASSERT(rc == 0); 1634 CU_ASSERT(g_io_done == false); 1635 1636 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1637 stub_complete_io(1); 1638 CU_ASSERT(g_io_done == false); 1639 1640 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1641 stub_complete_io(2); 1642 CU_ASSERT(g_io_done == true); 1643 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1644 1645 /* Test multi vector command that needs to be split due to the IO boundary and 1646 * the capacity of child iovs. Especially test the case when the command is 1647 * split due to the capacity of child iovs, the tail address is not aligned with 1648 * block size and is rewinded to the aligned address. 1649 * 1650 * The iovecs used in read request is complex but is based on the data 1651 * collected in the real issue. We change the base addresses but keep the lengths 1652 * not to loose the credibility of the test. 1653 */ 1654 bdev->optimal_io_boundary = 128; 1655 g_io_done = false; 1656 g_io_status = 0; 1657 1658 for (i = 0; i < 31; i++) { 1659 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1660 iov[i].iov_len = 1024; 1661 } 1662 iov[31].iov_base = (void *)0xFEED1F00000; 1663 iov[31].iov_len = 32768; 1664 iov[32].iov_base = (void *)0xFEED2000000; 1665 iov[32].iov_len = 160; 1666 iov[33].iov_base = (void *)0xFEED2100000; 1667 iov[33].iov_len = 4096; 1668 iov[34].iov_base = (void *)0xFEED2200000; 1669 iov[34].iov_len = 4096; 1670 iov[35].iov_base = (void *)0xFEED2300000; 1671 iov[35].iov_len = 4096; 1672 iov[36].iov_base = (void *)0xFEED2400000; 1673 iov[36].iov_len = 4096; 1674 iov[37].iov_base = (void *)0xFEED2500000; 1675 iov[37].iov_len = 4096; 1676 iov[38].iov_base = (void *)0xFEED2600000; 1677 iov[38].iov_len = 4096; 1678 iov[39].iov_base = (void *)0xFEED2700000; 1679 iov[39].iov_len = 4096; 1680 iov[40].iov_base = (void *)0xFEED2800000; 1681 iov[40].iov_len = 4096; 1682 iov[41].iov_base = (void *)0xFEED2900000; 1683 iov[41].iov_len = 4096; 1684 iov[42].iov_base = (void *)0xFEED2A00000; 1685 iov[42].iov_len = 4096; 1686 iov[43].iov_base = (void *)0xFEED2B00000; 1687 iov[43].iov_len = 12288; 1688 iov[44].iov_base = (void *)0xFEED2C00000; 1689 iov[44].iov_len = 8192; 1690 iov[45].iov_base = (void *)0xFEED2F00000; 1691 iov[45].iov_len = 4096; 1692 iov[46].iov_base = (void *)0xFEED3000000; 1693 iov[46].iov_len = 4096; 1694 iov[47].iov_base = (void *)0xFEED3100000; 1695 iov[47].iov_len = 4096; 1696 iov[48].iov_base = (void *)0xFEED3200000; 1697 iov[48].iov_len = 24576; 1698 iov[49].iov_base = (void *)0xFEED3300000; 1699 iov[49].iov_len = 16384; 1700 iov[50].iov_base = (void *)0xFEED3400000; 1701 iov[50].iov_len = 12288; 1702 iov[51].iov_base = (void *)0xFEED3500000; 1703 iov[51].iov_len = 4096; 1704 iov[52].iov_base = (void *)0xFEED3600000; 1705 iov[52].iov_len = 4096; 1706 iov[53].iov_base = (void *)0xFEED3700000; 1707 iov[53].iov_len = 4096; 1708 iov[54].iov_base = (void *)0xFEED3800000; 1709 iov[54].iov_len = 28672; 1710 iov[55].iov_base = (void *)0xFEED3900000; 1711 iov[55].iov_len = 20480; 1712 iov[56].iov_base = (void *)0xFEED3A00000; 1713 iov[56].iov_len = 4096; 1714 iov[57].iov_base = (void *)0xFEED3B00000; 1715 iov[57].iov_len = 12288; 1716 iov[58].iov_base = (void *)0xFEED3C00000; 1717 iov[58].iov_len = 4096; 1718 iov[59].iov_base = (void *)0xFEED3D00000; 1719 iov[59].iov_len = 4096; 1720 iov[60].iov_base = (void *)0xFEED3E00000; 1721 iov[60].iov_len = 352; 1722 1723 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1724 * of child iovs, 1725 */ 1726 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1727 expected_io->md_buf = md_buf; 1728 for (i = 0; i < 32; i++) { 1729 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1730 } 1731 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1732 1733 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1734 * split by the IO boundary requirement. 1735 */ 1736 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1737 expected_io->md_buf = md_buf + 126 * 8; 1738 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1739 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1740 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1741 1742 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1743 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1744 */ 1745 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1746 expected_io->md_buf = md_buf + 128 * 8; 1747 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1748 iov[33].iov_len - 864); 1749 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1750 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1751 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1752 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1753 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1754 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1755 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1756 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1757 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1758 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1759 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1760 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1761 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1762 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1763 1764 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1765 * first 864 bytes of iov[52] split by the IO boundary requirement. 1766 */ 1767 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1768 expected_io->md_buf = md_buf + 256 * 8; 1769 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1770 iov[46].iov_len - 864); 1771 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1772 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1773 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1774 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1775 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1776 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1777 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1778 1779 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1780 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1781 */ 1782 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1783 expected_io->md_buf = md_buf + 384 * 8; 1784 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1785 iov[52].iov_len - 864); 1786 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1787 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1788 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1789 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1790 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1791 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1792 1793 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1794 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1795 */ 1796 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1797 expected_io->md_buf = md_buf + 512 * 8; 1798 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1799 iov[57].iov_len - 4960); 1800 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1801 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1802 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1803 1804 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1805 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1806 expected_io->md_buf = md_buf + 542 * 8; 1807 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1808 iov[59].iov_len - 3936); 1809 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1810 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1811 1812 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1813 0, 543, io_done, NULL); 1814 CU_ASSERT(rc == 0); 1815 CU_ASSERT(g_io_done == false); 1816 1817 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1818 stub_complete_io(1); 1819 CU_ASSERT(g_io_done == false); 1820 1821 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1822 stub_complete_io(5); 1823 CU_ASSERT(g_io_done == false); 1824 1825 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1826 stub_complete_io(1); 1827 CU_ASSERT(g_io_done == true); 1828 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1829 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1830 1831 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1832 * split, so test that. 1833 */ 1834 bdev->optimal_io_boundary = 15; 1835 g_io_done = false; 1836 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1837 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1838 1839 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1840 CU_ASSERT(rc == 0); 1841 CU_ASSERT(g_io_done == false); 1842 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1843 stub_complete_io(1); 1844 CU_ASSERT(g_io_done == true); 1845 1846 /* Test an UNMAP. This should also not be split. */ 1847 bdev->optimal_io_boundary = 16; 1848 g_io_done = false; 1849 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1850 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1851 1852 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1853 CU_ASSERT(rc == 0); 1854 CU_ASSERT(g_io_done == false); 1855 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1856 stub_complete_io(1); 1857 CU_ASSERT(g_io_done == true); 1858 1859 /* Test a FLUSH. This should also not be split. */ 1860 bdev->optimal_io_boundary = 16; 1861 g_io_done = false; 1862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1863 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1864 1865 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1866 CU_ASSERT(rc == 0); 1867 CU_ASSERT(g_io_done == false); 1868 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1869 stub_complete_io(1); 1870 CU_ASSERT(g_io_done == true); 1871 1872 /* Test a COPY. This should also not be split. */ 1873 bdev->optimal_io_boundary = 15; 1874 g_io_done = false; 1875 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1876 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1877 1878 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1879 CU_ASSERT(rc == 0); 1880 CU_ASSERT(g_io_done == false); 1881 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1882 stub_complete_io(1); 1883 CU_ASSERT(g_io_done == true); 1884 1885 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1886 1887 /* Children requests return an error status */ 1888 bdev->optimal_io_boundary = 16; 1889 iov[0].iov_base = (void *)0x10000; 1890 iov[0].iov_len = 512 * 64; 1891 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1892 g_io_done = false; 1893 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1894 1895 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1896 CU_ASSERT(rc == 0); 1897 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1898 stub_complete_io(4); 1899 CU_ASSERT(g_io_done == false); 1900 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1901 stub_complete_io(1); 1902 CU_ASSERT(g_io_done == true); 1903 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1904 1905 /* Test if a multi vector command terminated with failure before continuing 1906 * splitting process when one of child I/O failed. 1907 * The multi vector command is as same as the above that needs to be split by strip 1908 * and then needs to be split further due to the capacity of child iovs. 1909 */ 1910 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1911 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1912 iov[i].iov_len = 512; 1913 } 1914 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1915 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1916 1917 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1918 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1919 1920 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1921 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1922 1923 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1924 1925 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1926 g_io_done = false; 1927 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1928 1929 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1930 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1931 CU_ASSERT(rc == 0); 1932 CU_ASSERT(g_io_done == false); 1933 1934 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1935 stub_complete_io(1); 1936 CU_ASSERT(g_io_done == true); 1937 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1938 1939 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1940 1941 /* for this test we will create the following conditions to hit the code path where 1942 * we are trying to send and IO following a split that has no iovs because we had to 1943 * trim them for alignment reasons. 1944 * 1945 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1946 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1947 * position 30 and overshoot by 0x2e. 1948 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1949 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1950 * which eliniates that vector so we just send the first split IO with 30 vectors 1951 * and let the completion pick up the last 2 vectors. 1952 */ 1953 bdev->optimal_io_boundary = 32; 1954 bdev->split_on_optimal_io_boundary = true; 1955 g_io_done = false; 1956 1957 /* Init all parent IOVs to 0x212 */ 1958 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1959 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1960 iov[i].iov_len = 0x212; 1961 } 1962 1963 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1964 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1965 /* expect 0-29 to be 1:1 with the parent iov */ 1966 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1967 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1968 } 1969 1970 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1971 * where 0x1e is the amount we overshot the 16K boundary 1972 */ 1973 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1974 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1975 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1976 1977 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1978 * shortened that take it to the next boundary and then a final one to get us to 1979 * 0x4200 bytes for the IO. 1980 */ 1981 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1982 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 1983 /* position 30 picked up the remaining bytes to the next boundary */ 1984 ut_expected_io_set_iov(expected_io, 0, 1985 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1986 1987 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1988 ut_expected_io_set_iov(expected_io, 1, 1989 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1990 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1991 1992 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 1993 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1994 CU_ASSERT(rc == 0); 1995 CU_ASSERT(g_io_done == false); 1996 1997 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1998 stub_complete_io(1); 1999 CU_ASSERT(g_io_done == false); 2000 2001 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2002 stub_complete_io(1); 2003 CU_ASSERT(g_io_done == true); 2004 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2005 2006 spdk_put_io_channel(io_ch); 2007 spdk_bdev_close(desc); 2008 free_bdev(bdev); 2009 ut_fini_bdev(); 2010 } 2011 2012 static void 2013 bdev_io_max_size_and_segment_split_test(void) 2014 { 2015 struct spdk_bdev *bdev; 2016 struct spdk_bdev_desc *desc = NULL; 2017 struct spdk_io_channel *io_ch; 2018 struct spdk_bdev_opts bdev_opts = {}; 2019 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2020 struct ut_expected_io *expected_io; 2021 uint64_t i; 2022 int rc; 2023 2024 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2025 bdev_opts.bdev_io_pool_size = 512; 2026 bdev_opts.bdev_io_cache_size = 64; 2027 bdev_opts.opts_size = sizeof(bdev_opts); 2028 ut_init_bdev(&bdev_opts); 2029 2030 bdev = allocate_bdev("bdev0"); 2031 2032 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2033 CU_ASSERT(rc == 0); 2034 SPDK_CU_ASSERT_FATAL(desc != NULL); 2035 io_ch = spdk_bdev_get_io_channel(desc); 2036 CU_ASSERT(io_ch != NULL); 2037 2038 bdev->split_on_optimal_io_boundary = false; 2039 bdev->optimal_io_boundary = 0; 2040 2041 /* Case 0 max_num_segments == 0. 2042 * but segment size 2 * 512 > 512 2043 */ 2044 bdev->max_segment_size = 512; 2045 bdev->max_num_segments = 0; 2046 g_io_done = false; 2047 2048 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2049 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2050 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2051 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2052 2053 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2054 CU_ASSERT(rc == 0); 2055 CU_ASSERT(g_io_done == false); 2056 2057 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2058 stub_complete_io(1); 2059 CU_ASSERT(g_io_done == true); 2060 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2061 2062 /* Case 1 max_segment_size == 0 2063 * but iov num 2 > 1. 2064 */ 2065 bdev->max_segment_size = 0; 2066 bdev->max_num_segments = 1; 2067 g_io_done = false; 2068 2069 iov[0].iov_base = (void *)0x10000; 2070 iov[0].iov_len = 512; 2071 iov[1].iov_base = (void *)0x20000; 2072 iov[1].iov_len = 8 * 512; 2073 2074 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2075 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2076 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2077 2078 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2079 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2080 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2081 2082 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2083 CU_ASSERT(rc == 0); 2084 CU_ASSERT(g_io_done == false); 2085 2086 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2087 stub_complete_io(2); 2088 CU_ASSERT(g_io_done == true); 2089 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2090 2091 /* Test that a non-vector command is split correctly. 2092 * Set up the expected values before calling spdk_bdev_read_blocks 2093 */ 2094 bdev->max_segment_size = 512; 2095 bdev->max_num_segments = 1; 2096 g_io_done = false; 2097 2098 /* Child IO 0 */ 2099 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2100 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2101 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2102 2103 /* Child IO 1 */ 2104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2105 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2106 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2107 2108 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2109 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2110 CU_ASSERT(rc == 0); 2111 CU_ASSERT(g_io_done == false); 2112 2113 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2114 stub_complete_io(2); 2115 CU_ASSERT(g_io_done == true); 2116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2117 2118 /* Now set up a more complex, multi-vector command that needs to be split, 2119 * including splitting iovecs. 2120 */ 2121 bdev->max_segment_size = 2 * 512; 2122 bdev->max_num_segments = 1; 2123 g_io_done = false; 2124 2125 iov[0].iov_base = (void *)0x10000; 2126 iov[0].iov_len = 2 * 512; 2127 iov[1].iov_base = (void *)0x20000; 2128 iov[1].iov_len = 4 * 512; 2129 iov[2].iov_base = (void *)0x30000; 2130 iov[2].iov_len = 6 * 512; 2131 2132 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2133 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2134 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2135 2136 /* Split iov[1].size to 2 iov entries then split the segments */ 2137 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2138 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2139 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2140 2141 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2142 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2143 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2144 2145 /* Split iov[2].size to 3 iov entries then split the segments */ 2146 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2147 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2148 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2149 2150 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2151 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2152 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2153 2154 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2155 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2156 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2157 2158 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2159 CU_ASSERT(rc == 0); 2160 CU_ASSERT(g_io_done == false); 2161 2162 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2163 stub_complete_io(6); 2164 CU_ASSERT(g_io_done == true); 2165 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2166 2167 /* Test multi vector command that needs to be split by strip and then needs to be 2168 * split further due to the capacity of parent IO child iovs. 2169 */ 2170 bdev->max_segment_size = 512; 2171 bdev->max_num_segments = 1; 2172 g_io_done = false; 2173 2174 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2175 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2176 iov[i].iov_len = 512 * 2; 2177 } 2178 2179 /* Each input iov.size is split into 2 iovs, 2180 * half of the input iov can fill all child iov entries of a single IO. 2181 */ 2182 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2183 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2184 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2185 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2186 2187 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2188 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2189 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2190 } 2191 2192 /* The remaining iov is split in the second round */ 2193 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2194 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2195 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2196 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2197 2198 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2199 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2200 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2201 } 2202 2203 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2204 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2205 CU_ASSERT(rc == 0); 2206 CU_ASSERT(g_io_done == false); 2207 2208 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2209 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2210 CU_ASSERT(g_io_done == false); 2211 2212 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2213 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2214 CU_ASSERT(g_io_done == true); 2215 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2216 2217 /* A wrong case, a child IO that is divided does 2218 * not meet the principle of multiples of block size, 2219 * and exits with error 2220 */ 2221 bdev->max_segment_size = 512; 2222 bdev->max_num_segments = 1; 2223 g_io_done = false; 2224 2225 iov[0].iov_base = (void *)0x10000; 2226 iov[0].iov_len = 512 + 256; 2227 iov[1].iov_base = (void *)0x20000; 2228 iov[1].iov_len = 256; 2229 2230 /* iov[0] is split to 512 and 256. 2231 * 256 is less than a block size, and it is found 2232 * in the next round of split that it is the first child IO smaller than 2233 * the block size, so the error exit 2234 */ 2235 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2236 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2237 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2238 2239 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2240 CU_ASSERT(rc == 0); 2241 CU_ASSERT(g_io_done == false); 2242 2243 /* First child IO is OK */ 2244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2245 stub_complete_io(1); 2246 CU_ASSERT(g_io_done == true); 2247 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2248 2249 /* error exit */ 2250 stub_complete_io(1); 2251 CU_ASSERT(g_io_done == true); 2252 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2253 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2254 2255 /* Test multi vector command that needs to be split by strip and then needs to be 2256 * split further due to the capacity of child iovs. 2257 * 2258 * In this case, the last two iovs need to be split, but it will exceed the capacity 2259 * of child iovs, so it needs to wait until the first batch completed. 2260 */ 2261 bdev->max_segment_size = 512; 2262 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2263 g_io_done = false; 2264 2265 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2266 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2267 iov[i].iov_len = 512; 2268 } 2269 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2270 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2271 iov[i].iov_len = 512 * 2; 2272 } 2273 2274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2275 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2276 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2277 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2278 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2279 } 2280 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2281 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2282 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2283 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2284 2285 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2286 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2287 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2288 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2289 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2290 2291 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2292 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2293 CU_ASSERT(rc == 0); 2294 CU_ASSERT(g_io_done == false); 2295 2296 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2297 stub_complete_io(1); 2298 CU_ASSERT(g_io_done == false); 2299 2300 /* Next round */ 2301 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2302 stub_complete_io(1); 2303 CU_ASSERT(g_io_done == true); 2304 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2305 2306 /* This case is similar to the previous one, but the io composed of 2307 * the last few entries of child iov is not enough for a blocklen, so they 2308 * cannot be put into this IO, but wait until the next time. 2309 */ 2310 bdev->max_segment_size = 512; 2311 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2312 g_io_done = false; 2313 2314 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2315 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2316 iov[i].iov_len = 512; 2317 } 2318 2319 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2320 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2321 iov[i].iov_len = 128; 2322 } 2323 2324 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2325 * Because the left 2 iov is not enough for a blocklen. 2326 */ 2327 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2328 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2329 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2330 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2331 } 2332 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2333 2334 /* The second child io waits until the end of the first child io before executing. 2335 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2336 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2337 */ 2338 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2339 1, 4); 2340 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2341 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2342 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2343 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2344 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2345 2346 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2347 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2348 CU_ASSERT(rc == 0); 2349 CU_ASSERT(g_io_done == false); 2350 2351 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2352 stub_complete_io(1); 2353 CU_ASSERT(g_io_done == false); 2354 2355 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2356 stub_complete_io(1); 2357 CU_ASSERT(g_io_done == true); 2358 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2359 2360 /* A very complicated case. Each sg entry exceeds max_segment_size and 2361 * needs to be split. At the same time, child io must be a multiple of blocklen. 2362 * At the same time, child iovcnt exceeds parent iovcnt. 2363 */ 2364 bdev->max_segment_size = 512 + 128; 2365 bdev->max_num_segments = 3; 2366 g_io_done = false; 2367 2368 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2369 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2370 iov[i].iov_len = 512 + 256; 2371 } 2372 2373 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2374 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2375 iov[i].iov_len = 512 + 128; 2376 } 2377 2378 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2379 * Consume 4 parent IO iov entries per for() round and 6 block size. 2380 * Generate 9 child IOs. 2381 */ 2382 for (i = 0; i < 3; i++) { 2383 uint32_t j = i * 4; 2384 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2385 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2386 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2387 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2388 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2389 2390 /* Child io must be a multiple of blocklen 2391 * iov[j + 2] must be split. If the third entry is also added, 2392 * the multiple of blocklen cannot be guaranteed. But it still 2393 * occupies one iov entry of the parent child iov. 2394 */ 2395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2396 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2397 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2398 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2399 2400 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2401 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2402 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2403 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2404 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2405 } 2406 2407 /* Child iov position at 27, the 10th child IO 2408 * iov entry index is 3 * 4 and offset is 3 * 6 2409 */ 2410 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2411 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2412 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2413 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2415 2416 /* Child iov position at 30, the 11th child IO */ 2417 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2418 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2419 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2420 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2421 2422 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2423 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2424 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2425 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2426 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2427 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2428 2429 /* Consume 9 child IOs and 27 child iov entries. 2430 * Consume 4 parent IO iov entries per for() round and 6 block size. 2431 * Parent IO iov index start from 16 and block offset start from 24 2432 */ 2433 for (i = 0; i < 3; i++) { 2434 uint32_t j = i * 4 + 16; 2435 uint32_t offset = i * 6 + 24; 2436 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2437 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2438 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2439 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2440 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2441 2442 /* Child io must be a multiple of blocklen 2443 * iov[j + 2] must be split. If the third entry is also added, 2444 * the multiple of blocklen cannot be guaranteed. But it still 2445 * occupies one iov entry of the parent child iov. 2446 */ 2447 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2448 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2449 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2450 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2451 2452 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2453 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2454 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2455 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2456 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2457 } 2458 2459 /* The 22th child IO, child iov position at 30 */ 2460 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2461 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2462 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2463 2464 /* The third round */ 2465 /* Here is the 23nd child IO and child iovpos is 0 */ 2466 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2467 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2468 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2469 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2470 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2471 2472 /* The 24th child IO */ 2473 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2474 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2475 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2476 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2477 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2478 2479 /* The 25th child IO */ 2480 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2481 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2482 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2483 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2484 2485 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2486 50, io_done, NULL); 2487 CU_ASSERT(rc == 0); 2488 CU_ASSERT(g_io_done == false); 2489 2490 /* Parent IO supports up to 32 child iovs, so it is calculated that 2491 * a maximum of 11 IOs can be split at a time, and the 2492 * splitting will continue after the first batch is over. 2493 */ 2494 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2495 stub_complete_io(11); 2496 CU_ASSERT(g_io_done == false); 2497 2498 /* The 2nd round */ 2499 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2500 stub_complete_io(11); 2501 CU_ASSERT(g_io_done == false); 2502 2503 /* The last round */ 2504 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2505 stub_complete_io(3); 2506 CU_ASSERT(g_io_done == true); 2507 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2508 2509 /* Test an WRITE_ZEROES. This should also not be split. */ 2510 bdev->max_segment_size = 512; 2511 bdev->max_num_segments = 1; 2512 g_io_done = false; 2513 2514 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2516 2517 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2518 CU_ASSERT(rc == 0); 2519 CU_ASSERT(g_io_done == false); 2520 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2521 stub_complete_io(1); 2522 CU_ASSERT(g_io_done == true); 2523 2524 /* Test an UNMAP. This should also not be split. */ 2525 g_io_done = false; 2526 2527 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2528 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2529 2530 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2531 CU_ASSERT(rc == 0); 2532 CU_ASSERT(g_io_done == false); 2533 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2534 stub_complete_io(1); 2535 CU_ASSERT(g_io_done == true); 2536 2537 /* Test a FLUSH. This should also not be split. */ 2538 g_io_done = false; 2539 2540 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2541 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2542 2543 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2544 CU_ASSERT(rc == 0); 2545 CU_ASSERT(g_io_done == false); 2546 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2547 stub_complete_io(1); 2548 CU_ASSERT(g_io_done == true); 2549 2550 /* Test a COPY. This should also not be split. */ 2551 g_io_done = false; 2552 2553 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2555 2556 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2557 CU_ASSERT(rc == 0); 2558 CU_ASSERT(g_io_done == false); 2559 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2560 stub_complete_io(1); 2561 CU_ASSERT(g_io_done == true); 2562 2563 spdk_put_io_channel(io_ch); 2564 spdk_bdev_close(desc); 2565 free_bdev(bdev); 2566 ut_fini_bdev(); 2567 } 2568 2569 static void 2570 bdev_io_mix_split_test(void) 2571 { 2572 struct spdk_bdev *bdev; 2573 struct spdk_bdev_desc *desc = NULL; 2574 struct spdk_io_channel *io_ch; 2575 struct spdk_bdev_opts bdev_opts = {}; 2576 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2577 struct ut_expected_io *expected_io; 2578 uint64_t i; 2579 int rc; 2580 2581 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2582 bdev_opts.bdev_io_pool_size = 512; 2583 bdev_opts.bdev_io_cache_size = 64; 2584 ut_init_bdev(&bdev_opts); 2585 2586 bdev = allocate_bdev("bdev0"); 2587 2588 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2589 CU_ASSERT(rc == 0); 2590 SPDK_CU_ASSERT_FATAL(desc != NULL); 2591 io_ch = spdk_bdev_get_io_channel(desc); 2592 CU_ASSERT(io_ch != NULL); 2593 2594 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2595 bdev->split_on_optimal_io_boundary = true; 2596 bdev->optimal_io_boundary = 16; 2597 2598 bdev->max_segment_size = 512; 2599 bdev->max_num_segments = 16; 2600 g_io_done = false; 2601 2602 /* IO crossing the IO boundary requires split 2603 * Total 2 child IOs. 2604 */ 2605 2606 /* The 1st child IO split the segment_size to multiple segment entry */ 2607 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2608 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2609 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2610 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2611 2612 /* The 2nd child IO split the segment_size to multiple segment entry */ 2613 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2614 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2615 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2616 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2617 2618 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2619 CU_ASSERT(rc == 0); 2620 CU_ASSERT(g_io_done == false); 2621 2622 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2623 stub_complete_io(2); 2624 CU_ASSERT(g_io_done == true); 2625 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2626 2627 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2628 bdev->max_segment_size = 15 * 512; 2629 bdev->max_num_segments = 1; 2630 g_io_done = false; 2631 2632 /* IO crossing the IO boundary requires split. 2633 * The 1st child IO segment size exceeds the max_segment_size, 2634 * So 1st child IO will be split to multiple segment entry. 2635 * Then it split to 2 child IOs because of the max_num_segments. 2636 * Total 3 child IOs. 2637 */ 2638 2639 /* The first 2 IOs are in an IO boundary. 2640 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2641 * So it split to the first 2 IOs. 2642 */ 2643 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2644 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2645 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2646 2647 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2648 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2649 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2650 2651 /* The 3rd Child IO is because of the io boundary */ 2652 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2653 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2654 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2655 2656 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2657 CU_ASSERT(rc == 0); 2658 CU_ASSERT(g_io_done == false); 2659 2660 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2661 stub_complete_io(3); 2662 CU_ASSERT(g_io_done == true); 2663 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2664 2665 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2666 bdev->max_segment_size = 17 * 512; 2667 bdev->max_num_segments = 1; 2668 g_io_done = false; 2669 2670 /* IO crossing the IO boundary requires split. 2671 * Child IO does not split. 2672 * Total 2 child IOs. 2673 */ 2674 2675 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2676 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2677 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2678 2679 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2680 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2681 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2682 2683 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2684 CU_ASSERT(rc == 0); 2685 CU_ASSERT(g_io_done == false); 2686 2687 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2688 stub_complete_io(2); 2689 CU_ASSERT(g_io_done == true); 2690 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2691 2692 /* Now set up a more complex, multi-vector command that needs to be split, 2693 * including splitting iovecs. 2694 * optimal_io_boundary < max_segment_size * max_num_segments 2695 */ 2696 bdev->max_segment_size = 3 * 512; 2697 bdev->max_num_segments = 6; 2698 g_io_done = false; 2699 2700 iov[0].iov_base = (void *)0x10000; 2701 iov[0].iov_len = 4 * 512; 2702 iov[1].iov_base = (void *)0x20000; 2703 iov[1].iov_len = 4 * 512; 2704 iov[2].iov_base = (void *)0x30000; 2705 iov[2].iov_len = 10 * 512; 2706 2707 /* IO crossing the IO boundary requires split. 2708 * The 1st child IO segment size exceeds the max_segment_size and after 2709 * splitting segment_size, the num_segments exceeds max_num_segments. 2710 * So 1st child IO will be split to 2 child IOs. 2711 * Total 3 child IOs. 2712 */ 2713 2714 /* The first 2 IOs are in an IO boundary. 2715 * After splitting segment size the segment num exceeds. 2716 * So it splits to 2 child IOs. 2717 */ 2718 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2719 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2720 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2721 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2722 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2723 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2724 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2725 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2726 2727 /* The 2nd child IO has the left segment entry */ 2728 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2729 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2730 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2731 2732 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2733 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2734 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2735 2736 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2737 CU_ASSERT(rc == 0); 2738 CU_ASSERT(g_io_done == false); 2739 2740 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2741 stub_complete_io(3); 2742 CU_ASSERT(g_io_done == true); 2743 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2744 2745 /* A very complicated case. Each sg entry exceeds max_segment_size 2746 * and split on io boundary. 2747 * optimal_io_boundary < max_segment_size * max_num_segments 2748 */ 2749 bdev->max_segment_size = 3 * 512; 2750 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2751 g_io_done = false; 2752 2753 for (i = 0; i < 20; i++) { 2754 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2755 iov[i].iov_len = 512 * 4; 2756 } 2757 2758 /* IO crossing the IO boundary requires split. 2759 * 80 block length can split 5 child IOs base on offset and IO boundary. 2760 * Each iov entry needs to be split to 2 entries because of max_segment_size 2761 * Total 5 child IOs. 2762 */ 2763 2764 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2765 * So each child IO occupies 8 child iov entries. 2766 */ 2767 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2768 for (i = 0; i < 4; i++) { 2769 int iovcnt = i * 2; 2770 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2771 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2772 } 2773 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2774 2775 /* 2nd child IO and total 16 child iov entries of parent IO */ 2776 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2777 for (i = 4; i < 8; i++) { 2778 int iovcnt = (i - 4) * 2; 2779 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2780 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2781 } 2782 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2783 2784 /* 3rd child IO and total 24 child iov entries of parent IO */ 2785 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2786 for (i = 8; i < 12; i++) { 2787 int iovcnt = (i - 8) * 2; 2788 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2789 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2790 } 2791 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2792 2793 /* 4th child IO and total 32 child iov entries of parent IO */ 2794 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2795 for (i = 12; i < 16; i++) { 2796 int iovcnt = (i - 12) * 2; 2797 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2798 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2799 } 2800 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2801 2802 /* 5th child IO and because of the child iov entry it should be split 2803 * in next round. 2804 */ 2805 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2806 for (i = 16; i < 20; i++) { 2807 int iovcnt = (i - 16) * 2; 2808 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2809 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2810 } 2811 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2812 2813 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2814 CU_ASSERT(rc == 0); 2815 CU_ASSERT(g_io_done == false); 2816 2817 /* First split round */ 2818 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2819 stub_complete_io(4); 2820 CU_ASSERT(g_io_done == false); 2821 2822 /* Second split round */ 2823 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2824 stub_complete_io(1); 2825 CU_ASSERT(g_io_done == true); 2826 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2827 2828 spdk_put_io_channel(io_ch); 2829 spdk_bdev_close(desc); 2830 free_bdev(bdev); 2831 ut_fini_bdev(); 2832 } 2833 2834 static void 2835 bdev_io_split_with_io_wait(void) 2836 { 2837 struct spdk_bdev *bdev; 2838 struct spdk_bdev_desc *desc = NULL; 2839 struct spdk_io_channel *io_ch; 2840 struct spdk_bdev_channel *channel; 2841 struct spdk_bdev_mgmt_channel *mgmt_ch; 2842 struct spdk_bdev_opts bdev_opts = {}; 2843 struct iovec iov[3]; 2844 struct ut_expected_io *expected_io; 2845 int rc; 2846 2847 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2848 bdev_opts.bdev_io_pool_size = 2; 2849 bdev_opts.bdev_io_cache_size = 1; 2850 ut_init_bdev(&bdev_opts); 2851 2852 bdev = allocate_bdev("bdev0"); 2853 2854 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2855 CU_ASSERT(rc == 0); 2856 CU_ASSERT(desc != NULL); 2857 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2858 io_ch = spdk_bdev_get_io_channel(desc); 2859 CU_ASSERT(io_ch != NULL); 2860 channel = spdk_io_channel_get_ctx(io_ch); 2861 mgmt_ch = channel->shared_resource->mgmt_ch; 2862 2863 bdev->optimal_io_boundary = 16; 2864 bdev->split_on_optimal_io_boundary = true; 2865 2866 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2867 CU_ASSERT(rc == 0); 2868 2869 /* Now test that a single-vector command is split correctly. 2870 * Offset 14, length 8, payload 0xF000 2871 * Child - Offset 14, length 2, payload 0xF000 2872 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2873 * 2874 * Set up the expected values before calling spdk_bdev_read_blocks 2875 */ 2876 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2877 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2878 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2879 2880 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2881 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2882 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2883 2884 /* The following children will be submitted sequentially due to the capacity of 2885 * spdk_bdev_io. 2886 */ 2887 2888 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2889 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2890 CU_ASSERT(rc == 0); 2891 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2892 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2893 2894 /* Completing the first read I/O will submit the first child */ 2895 stub_complete_io(1); 2896 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2897 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2898 2899 /* Completing the first child will submit the second child */ 2900 stub_complete_io(1); 2901 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2902 2903 /* Complete the second child I/O. This should result in our callback getting 2904 * invoked since the parent I/O is now complete. 2905 */ 2906 stub_complete_io(1); 2907 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2908 2909 /* Now set up a more complex, multi-vector command that needs to be split, 2910 * including splitting iovecs. 2911 */ 2912 iov[0].iov_base = (void *)0x10000; 2913 iov[0].iov_len = 512; 2914 iov[1].iov_base = (void *)0x20000; 2915 iov[1].iov_len = 20 * 512; 2916 iov[2].iov_base = (void *)0x30000; 2917 iov[2].iov_len = 11 * 512; 2918 2919 g_io_done = false; 2920 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2921 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2922 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2923 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2924 2925 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2926 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2927 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2928 2929 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2930 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2931 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2932 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2933 2934 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2935 CU_ASSERT(rc == 0); 2936 CU_ASSERT(g_io_done == false); 2937 2938 /* The following children will be submitted sequentially due to the capacity of 2939 * spdk_bdev_io. 2940 */ 2941 2942 /* Completing the first child will submit the second child */ 2943 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2944 stub_complete_io(1); 2945 CU_ASSERT(g_io_done == false); 2946 2947 /* Completing the second child will submit the third child */ 2948 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2949 stub_complete_io(1); 2950 CU_ASSERT(g_io_done == false); 2951 2952 /* Completing the third child will result in our callback getting invoked 2953 * since the parent I/O is now complete. 2954 */ 2955 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2956 stub_complete_io(1); 2957 CU_ASSERT(g_io_done == true); 2958 2959 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2960 2961 spdk_put_io_channel(io_ch); 2962 spdk_bdev_close(desc); 2963 free_bdev(bdev); 2964 ut_fini_bdev(); 2965 } 2966 2967 static void 2968 bdev_io_write_unit_split_test(void) 2969 { 2970 struct spdk_bdev *bdev; 2971 struct spdk_bdev_desc *desc = NULL; 2972 struct spdk_io_channel *io_ch; 2973 struct spdk_bdev_opts bdev_opts = {}; 2974 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 2975 struct ut_expected_io *expected_io; 2976 uint64_t i; 2977 int rc; 2978 2979 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2980 bdev_opts.bdev_io_pool_size = 512; 2981 bdev_opts.bdev_io_cache_size = 64; 2982 ut_init_bdev(&bdev_opts); 2983 2984 bdev = allocate_bdev("bdev0"); 2985 2986 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2987 CU_ASSERT(rc == 0); 2988 SPDK_CU_ASSERT_FATAL(desc != NULL); 2989 io_ch = spdk_bdev_get_io_channel(desc); 2990 CU_ASSERT(io_ch != NULL); 2991 2992 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 2993 bdev->write_unit_size = 32; 2994 bdev->split_on_write_unit = true; 2995 g_io_done = false; 2996 2997 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 2998 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 2999 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3000 3001 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 3002 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 3003 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3004 3005 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3006 CU_ASSERT(rc == 0); 3007 CU_ASSERT(g_io_done == false); 3008 3009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3010 stub_complete_io(2); 3011 CU_ASSERT(g_io_done == true); 3012 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3013 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3014 3015 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 3016 * based on write_unit_size, not optimal_io_boundary */ 3017 bdev->split_on_optimal_io_boundary = true; 3018 bdev->optimal_io_boundary = 16; 3019 g_io_done = false; 3020 3021 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3022 CU_ASSERT(rc == 0); 3023 CU_ASSERT(g_io_done == false); 3024 3025 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3026 stub_complete_io(2); 3027 CU_ASSERT(g_io_done == true); 3028 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3029 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3030 3031 /* Write I/O should fail if it is smaller than write_unit_size */ 3032 g_io_done = false; 3033 3034 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3035 CU_ASSERT(rc == 0); 3036 CU_ASSERT(g_io_done == false); 3037 3038 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3039 poll_threads(); 3040 CU_ASSERT(g_io_done == true); 3041 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3042 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3043 3044 /* Same for I/O not aligned to write_unit_size */ 3045 g_io_done = false; 3046 3047 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3048 CU_ASSERT(rc == 0); 3049 CU_ASSERT(g_io_done == false); 3050 3051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3052 poll_threads(); 3053 CU_ASSERT(g_io_done == true); 3054 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3055 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3056 3057 /* Write should fail if it needs to be split but there are not enough iovs to submit 3058 * an entire write unit */ 3059 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3060 g_io_done = false; 3061 3062 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3063 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3064 iov[i].iov_len = 512; 3065 } 3066 3067 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3068 io_done, NULL); 3069 CU_ASSERT(rc == 0); 3070 CU_ASSERT(g_io_done == false); 3071 3072 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3073 poll_threads(); 3074 CU_ASSERT(g_io_done == true); 3075 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3076 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3077 3078 spdk_put_io_channel(io_ch); 3079 spdk_bdev_close(desc); 3080 free_bdev(bdev); 3081 ut_fini_bdev(); 3082 } 3083 3084 static void 3085 bdev_io_alignment(void) 3086 { 3087 struct spdk_bdev *bdev; 3088 struct spdk_bdev_desc *desc = NULL; 3089 struct spdk_io_channel *io_ch; 3090 struct spdk_bdev_opts bdev_opts = {}; 3091 int rc; 3092 void *buf = NULL; 3093 struct iovec iovs[2]; 3094 int iovcnt; 3095 uint64_t alignment; 3096 3097 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3098 bdev_opts.bdev_io_pool_size = 20; 3099 bdev_opts.bdev_io_cache_size = 2; 3100 ut_init_bdev(&bdev_opts); 3101 3102 fn_table.submit_request = stub_submit_request_get_buf; 3103 bdev = allocate_bdev("bdev0"); 3104 3105 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3106 CU_ASSERT(rc == 0); 3107 CU_ASSERT(desc != NULL); 3108 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3109 io_ch = spdk_bdev_get_io_channel(desc); 3110 CU_ASSERT(io_ch != NULL); 3111 3112 /* Create aligned buffer */ 3113 rc = posix_memalign(&buf, 4096, 8192); 3114 SPDK_CU_ASSERT_FATAL(rc == 0); 3115 3116 /* Pass aligned single buffer with no alignment required */ 3117 alignment = 1; 3118 bdev->required_alignment = spdk_u32log2(alignment); 3119 3120 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3121 CU_ASSERT(rc == 0); 3122 stub_complete_io(1); 3123 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3124 alignment)); 3125 3126 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3127 CU_ASSERT(rc == 0); 3128 stub_complete_io(1); 3129 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3130 alignment)); 3131 3132 /* Pass unaligned single buffer with no alignment required */ 3133 alignment = 1; 3134 bdev->required_alignment = spdk_u32log2(alignment); 3135 3136 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3137 CU_ASSERT(rc == 0); 3138 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3139 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3140 stub_complete_io(1); 3141 3142 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3143 CU_ASSERT(rc == 0); 3144 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3145 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3146 stub_complete_io(1); 3147 3148 /* Pass unaligned single buffer with 512 alignment required */ 3149 alignment = 512; 3150 bdev->required_alignment = spdk_u32log2(alignment); 3151 3152 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3153 CU_ASSERT(rc == 0); 3154 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3155 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3156 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3157 alignment)); 3158 stub_complete_io(1); 3159 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3160 3161 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3162 CU_ASSERT(rc == 0); 3163 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3164 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3165 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3166 alignment)); 3167 stub_complete_io(1); 3168 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3169 3170 /* Pass unaligned single buffer with 4096 alignment required */ 3171 alignment = 4096; 3172 bdev->required_alignment = spdk_u32log2(alignment); 3173 3174 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3175 CU_ASSERT(rc == 0); 3176 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3177 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3178 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3179 alignment)); 3180 stub_complete_io(1); 3181 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3182 3183 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3184 CU_ASSERT(rc == 0); 3185 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3186 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3187 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3188 alignment)); 3189 stub_complete_io(1); 3190 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3191 3192 /* Pass aligned iovs with no alignment required */ 3193 alignment = 1; 3194 bdev->required_alignment = spdk_u32log2(alignment); 3195 3196 iovcnt = 1; 3197 iovs[0].iov_base = buf; 3198 iovs[0].iov_len = 512; 3199 3200 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3201 CU_ASSERT(rc == 0); 3202 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3203 stub_complete_io(1); 3204 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3205 3206 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3207 CU_ASSERT(rc == 0); 3208 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3209 stub_complete_io(1); 3210 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3211 3212 /* Pass unaligned iovs with no alignment required */ 3213 alignment = 1; 3214 bdev->required_alignment = spdk_u32log2(alignment); 3215 3216 iovcnt = 2; 3217 iovs[0].iov_base = buf + 16; 3218 iovs[0].iov_len = 256; 3219 iovs[1].iov_base = buf + 16 + 256 + 32; 3220 iovs[1].iov_len = 256; 3221 3222 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3223 CU_ASSERT(rc == 0); 3224 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3225 stub_complete_io(1); 3226 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3227 3228 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3229 CU_ASSERT(rc == 0); 3230 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3231 stub_complete_io(1); 3232 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3233 3234 /* Pass unaligned iov with 2048 alignment required */ 3235 alignment = 2048; 3236 bdev->required_alignment = spdk_u32log2(alignment); 3237 3238 iovcnt = 2; 3239 iovs[0].iov_base = buf + 16; 3240 iovs[0].iov_len = 256; 3241 iovs[1].iov_base = buf + 16 + 256 + 32; 3242 iovs[1].iov_len = 256; 3243 3244 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3245 CU_ASSERT(rc == 0); 3246 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3247 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3248 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3249 alignment)); 3250 stub_complete_io(1); 3251 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3252 3253 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3254 CU_ASSERT(rc == 0); 3255 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3256 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3257 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3258 alignment)); 3259 stub_complete_io(1); 3260 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3261 3262 /* Pass iov without allocated buffer without alignment required */ 3263 alignment = 1; 3264 bdev->required_alignment = spdk_u32log2(alignment); 3265 3266 iovcnt = 1; 3267 iovs[0].iov_base = NULL; 3268 iovs[0].iov_len = 0; 3269 3270 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3271 CU_ASSERT(rc == 0); 3272 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3273 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3274 alignment)); 3275 stub_complete_io(1); 3276 3277 /* Pass iov without allocated buffer with 1024 alignment required */ 3278 alignment = 1024; 3279 bdev->required_alignment = spdk_u32log2(alignment); 3280 3281 iovcnt = 1; 3282 iovs[0].iov_base = NULL; 3283 iovs[0].iov_len = 0; 3284 3285 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3286 CU_ASSERT(rc == 0); 3287 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3288 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3289 alignment)); 3290 stub_complete_io(1); 3291 3292 spdk_put_io_channel(io_ch); 3293 spdk_bdev_close(desc); 3294 free_bdev(bdev); 3295 fn_table.submit_request = stub_submit_request; 3296 ut_fini_bdev(); 3297 3298 free(buf); 3299 } 3300 3301 static void 3302 bdev_io_alignment_with_boundary(void) 3303 { 3304 struct spdk_bdev *bdev; 3305 struct spdk_bdev_desc *desc = NULL; 3306 struct spdk_io_channel *io_ch; 3307 struct spdk_bdev_opts bdev_opts = {}; 3308 int rc; 3309 void *buf = NULL; 3310 struct iovec iovs[2]; 3311 int iovcnt; 3312 uint64_t alignment; 3313 3314 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3315 bdev_opts.bdev_io_pool_size = 20; 3316 bdev_opts.bdev_io_cache_size = 2; 3317 bdev_opts.opts_size = sizeof(bdev_opts); 3318 ut_init_bdev(&bdev_opts); 3319 3320 fn_table.submit_request = stub_submit_request_get_buf; 3321 bdev = allocate_bdev("bdev0"); 3322 3323 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3324 CU_ASSERT(rc == 0); 3325 CU_ASSERT(desc != NULL); 3326 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3327 io_ch = spdk_bdev_get_io_channel(desc); 3328 CU_ASSERT(io_ch != NULL); 3329 3330 /* Create aligned buffer */ 3331 rc = posix_memalign(&buf, 4096, 131072); 3332 SPDK_CU_ASSERT_FATAL(rc == 0); 3333 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3334 3335 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3336 alignment = 512; 3337 bdev->required_alignment = spdk_u32log2(alignment); 3338 bdev->optimal_io_boundary = 2; 3339 bdev->split_on_optimal_io_boundary = true; 3340 3341 iovcnt = 1; 3342 iovs[0].iov_base = NULL; 3343 iovs[0].iov_len = 512 * 3; 3344 3345 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3346 CU_ASSERT(rc == 0); 3347 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3348 stub_complete_io(2); 3349 3350 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3351 alignment = 512; 3352 bdev->required_alignment = spdk_u32log2(alignment); 3353 bdev->optimal_io_boundary = 16; 3354 bdev->split_on_optimal_io_boundary = true; 3355 3356 iovcnt = 1; 3357 iovs[0].iov_base = NULL; 3358 iovs[0].iov_len = 512 * 16; 3359 3360 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3361 CU_ASSERT(rc == 0); 3362 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3363 stub_complete_io(2); 3364 3365 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3366 alignment = 512; 3367 bdev->required_alignment = spdk_u32log2(alignment); 3368 bdev->optimal_io_boundary = 128; 3369 bdev->split_on_optimal_io_boundary = true; 3370 3371 iovcnt = 1; 3372 iovs[0].iov_base = buf + 16; 3373 iovs[0].iov_len = 512 * 160; 3374 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3375 CU_ASSERT(rc == 0); 3376 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3377 stub_complete_io(2); 3378 3379 /* 512 * 3 with 2 IO boundary */ 3380 alignment = 512; 3381 bdev->required_alignment = spdk_u32log2(alignment); 3382 bdev->optimal_io_boundary = 2; 3383 bdev->split_on_optimal_io_boundary = true; 3384 3385 iovcnt = 2; 3386 iovs[0].iov_base = buf + 16; 3387 iovs[0].iov_len = 512; 3388 iovs[1].iov_base = buf + 16 + 512 + 32; 3389 iovs[1].iov_len = 1024; 3390 3391 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3392 CU_ASSERT(rc == 0); 3393 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3394 stub_complete_io(2); 3395 3396 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3397 CU_ASSERT(rc == 0); 3398 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3399 stub_complete_io(2); 3400 3401 /* 512 * 64 with 32 IO boundary */ 3402 bdev->optimal_io_boundary = 32; 3403 iovcnt = 2; 3404 iovs[0].iov_base = buf + 16; 3405 iovs[0].iov_len = 16384; 3406 iovs[1].iov_base = buf + 16 + 16384 + 32; 3407 iovs[1].iov_len = 16384; 3408 3409 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3410 CU_ASSERT(rc == 0); 3411 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3412 stub_complete_io(3); 3413 3414 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3415 CU_ASSERT(rc == 0); 3416 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3417 stub_complete_io(3); 3418 3419 /* 512 * 160 with 32 IO boundary */ 3420 iovcnt = 1; 3421 iovs[0].iov_base = buf + 16; 3422 iovs[0].iov_len = 16384 + 65536; 3423 3424 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3425 CU_ASSERT(rc == 0); 3426 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3427 stub_complete_io(6); 3428 3429 spdk_put_io_channel(io_ch); 3430 spdk_bdev_close(desc); 3431 free_bdev(bdev); 3432 fn_table.submit_request = stub_submit_request; 3433 ut_fini_bdev(); 3434 3435 free(buf); 3436 } 3437 3438 static void 3439 histogram_status_cb(void *cb_arg, int status) 3440 { 3441 g_status = status; 3442 } 3443 3444 static void 3445 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3446 { 3447 g_status = status; 3448 g_histogram = histogram; 3449 } 3450 3451 static void 3452 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3453 uint64_t total, uint64_t so_far) 3454 { 3455 g_count += count; 3456 } 3457 3458 static void 3459 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3460 { 3461 spdk_histogram_data_fn cb_fn = cb_arg; 3462 3463 g_status = status; 3464 3465 if (status == 0) { 3466 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3467 } 3468 } 3469 3470 static void 3471 bdev_histograms(void) 3472 { 3473 struct spdk_bdev *bdev; 3474 struct spdk_bdev_desc *desc = NULL; 3475 struct spdk_io_channel *ch; 3476 struct spdk_histogram_data *histogram; 3477 uint8_t buf[4096]; 3478 int rc; 3479 3480 ut_init_bdev(NULL); 3481 3482 bdev = allocate_bdev("bdev"); 3483 3484 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3485 CU_ASSERT(rc == 0); 3486 CU_ASSERT(desc != NULL); 3487 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3488 3489 ch = spdk_bdev_get_io_channel(desc); 3490 CU_ASSERT(ch != NULL); 3491 3492 /* Enable histogram */ 3493 g_status = -1; 3494 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3495 poll_threads(); 3496 CU_ASSERT(g_status == 0); 3497 CU_ASSERT(bdev->internal.histogram_enabled == true); 3498 3499 /* Allocate histogram */ 3500 histogram = spdk_histogram_data_alloc(); 3501 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3502 3503 /* Check if histogram is zeroed */ 3504 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3505 poll_threads(); 3506 CU_ASSERT(g_status == 0); 3507 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3508 3509 g_count = 0; 3510 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3511 3512 CU_ASSERT(g_count == 0); 3513 3514 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3515 CU_ASSERT(rc == 0); 3516 3517 spdk_delay_us(10); 3518 stub_complete_io(1); 3519 poll_threads(); 3520 3521 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3522 CU_ASSERT(rc == 0); 3523 3524 spdk_delay_us(10); 3525 stub_complete_io(1); 3526 poll_threads(); 3527 3528 /* Check if histogram gathered data from all I/O channels */ 3529 g_histogram = NULL; 3530 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3531 poll_threads(); 3532 CU_ASSERT(g_status == 0); 3533 CU_ASSERT(bdev->internal.histogram_enabled == true); 3534 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3535 3536 g_count = 0; 3537 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3538 CU_ASSERT(g_count == 2); 3539 3540 g_count = 0; 3541 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3542 CU_ASSERT(g_status == 0); 3543 CU_ASSERT(g_count == 2); 3544 3545 /* Disable histogram */ 3546 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3547 poll_threads(); 3548 CU_ASSERT(g_status == 0); 3549 CU_ASSERT(bdev->internal.histogram_enabled == false); 3550 3551 /* Try to run histogram commands on disabled bdev */ 3552 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3553 poll_threads(); 3554 CU_ASSERT(g_status == -EFAULT); 3555 3556 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3557 CU_ASSERT(g_status == -EFAULT); 3558 3559 spdk_histogram_data_free(histogram); 3560 spdk_put_io_channel(ch); 3561 spdk_bdev_close(desc); 3562 free_bdev(bdev); 3563 ut_fini_bdev(); 3564 } 3565 3566 static void 3567 _bdev_compare(bool emulated) 3568 { 3569 struct spdk_bdev *bdev; 3570 struct spdk_bdev_desc *desc = NULL; 3571 struct spdk_io_channel *ioch; 3572 struct ut_expected_io *expected_io; 3573 uint64_t offset, num_blocks; 3574 uint32_t num_completed; 3575 char aa_buf[512]; 3576 char bb_buf[512]; 3577 struct iovec compare_iov; 3578 uint8_t expected_io_type; 3579 int rc; 3580 3581 if (emulated) { 3582 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3583 } else { 3584 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3585 } 3586 3587 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3588 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3589 3590 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3591 3592 ut_init_bdev(NULL); 3593 fn_table.submit_request = stub_submit_request_get_buf; 3594 bdev = allocate_bdev("bdev"); 3595 3596 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3597 CU_ASSERT_EQUAL(rc, 0); 3598 SPDK_CU_ASSERT_FATAL(desc != NULL); 3599 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3600 ioch = spdk_bdev_get_io_channel(desc); 3601 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3602 3603 fn_table.submit_request = stub_submit_request_get_buf; 3604 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3605 3606 offset = 50; 3607 num_blocks = 1; 3608 compare_iov.iov_base = aa_buf; 3609 compare_iov.iov_len = sizeof(aa_buf); 3610 3611 /* 1. successful compare */ 3612 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3614 3615 g_io_done = false; 3616 g_compare_read_buf = aa_buf; 3617 g_compare_read_buf_len = sizeof(aa_buf); 3618 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3619 CU_ASSERT_EQUAL(rc, 0); 3620 num_completed = stub_complete_io(1); 3621 CU_ASSERT_EQUAL(num_completed, 1); 3622 CU_ASSERT(g_io_done == true); 3623 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3624 3625 /* 2. miscompare */ 3626 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3627 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3628 3629 g_io_done = false; 3630 g_compare_read_buf = bb_buf; 3631 g_compare_read_buf_len = sizeof(bb_buf); 3632 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3633 CU_ASSERT_EQUAL(rc, 0); 3634 num_completed = stub_complete_io(1); 3635 CU_ASSERT_EQUAL(num_completed, 1); 3636 CU_ASSERT(g_io_done == true); 3637 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3638 3639 spdk_put_io_channel(ioch); 3640 spdk_bdev_close(desc); 3641 free_bdev(bdev); 3642 fn_table.submit_request = stub_submit_request; 3643 ut_fini_bdev(); 3644 3645 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3646 3647 g_compare_read_buf = NULL; 3648 } 3649 3650 static void 3651 _bdev_compare_with_md(bool emulated) 3652 { 3653 struct spdk_bdev *bdev; 3654 struct spdk_bdev_desc *desc = NULL; 3655 struct spdk_io_channel *ioch; 3656 struct ut_expected_io *expected_io; 3657 uint64_t offset, num_blocks; 3658 uint32_t num_completed; 3659 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3660 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3661 char buf_miscompare[1024 /* 2 * blocklen */]; 3662 char md_buf[16]; 3663 char md_buf_miscompare[16]; 3664 struct iovec compare_iov; 3665 uint8_t expected_io_type; 3666 int rc; 3667 3668 if (emulated) { 3669 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3670 } else { 3671 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3672 } 3673 3674 memset(buf, 0xaa, sizeof(buf)); 3675 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3676 /* make last md different */ 3677 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3678 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3679 memset(md_buf, 0xaa, 16); 3680 memset(md_buf_miscompare, 0xbb, 16); 3681 3682 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3683 3684 ut_init_bdev(NULL); 3685 fn_table.submit_request = stub_submit_request_get_buf; 3686 bdev = allocate_bdev("bdev"); 3687 3688 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3689 CU_ASSERT_EQUAL(rc, 0); 3690 SPDK_CU_ASSERT_FATAL(desc != NULL); 3691 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3692 ioch = spdk_bdev_get_io_channel(desc); 3693 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3694 3695 fn_table.submit_request = stub_submit_request_get_buf; 3696 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3697 3698 offset = 50; 3699 num_blocks = 2; 3700 3701 /* interleaved md & data */ 3702 bdev->md_interleave = true; 3703 bdev->md_len = 8; 3704 bdev->blocklen = 512 + 8; 3705 compare_iov.iov_base = buf; 3706 compare_iov.iov_len = sizeof(buf); 3707 3708 /* 1. successful compare with md interleaved */ 3709 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3710 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3711 3712 g_io_done = false; 3713 g_compare_read_buf = buf; 3714 g_compare_read_buf_len = sizeof(buf); 3715 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3716 CU_ASSERT_EQUAL(rc, 0); 3717 num_completed = stub_complete_io(1); 3718 CU_ASSERT_EQUAL(num_completed, 1); 3719 CU_ASSERT(g_io_done == true); 3720 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3721 3722 /* 2. miscompare with md interleaved */ 3723 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3724 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3725 3726 g_io_done = false; 3727 g_compare_read_buf = buf_interleaved_miscompare; 3728 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3729 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3730 CU_ASSERT_EQUAL(rc, 0); 3731 num_completed = stub_complete_io(1); 3732 CU_ASSERT_EQUAL(num_completed, 1); 3733 CU_ASSERT(g_io_done == true); 3734 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3735 3736 /* Separate data & md buffers */ 3737 bdev->md_interleave = false; 3738 bdev->blocklen = 512; 3739 compare_iov.iov_base = buf; 3740 compare_iov.iov_len = 1024; 3741 3742 /* 3. successful compare with md separated */ 3743 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3744 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3745 3746 g_io_done = false; 3747 g_compare_read_buf = buf; 3748 g_compare_read_buf_len = 1024; 3749 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3750 g_compare_md_buf = md_buf; 3751 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3752 offset, num_blocks, io_done, NULL); 3753 CU_ASSERT_EQUAL(rc, 0); 3754 num_completed = stub_complete_io(1); 3755 CU_ASSERT_EQUAL(num_completed, 1); 3756 CU_ASSERT(g_io_done == true); 3757 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3758 3759 /* 4. miscompare with md separated where md buf is different */ 3760 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3761 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3762 3763 g_io_done = false; 3764 g_compare_read_buf = buf; 3765 g_compare_read_buf_len = 1024; 3766 g_compare_md_buf = md_buf_miscompare; 3767 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3768 offset, num_blocks, io_done, NULL); 3769 CU_ASSERT_EQUAL(rc, 0); 3770 num_completed = stub_complete_io(1); 3771 CU_ASSERT_EQUAL(num_completed, 1); 3772 CU_ASSERT(g_io_done == true); 3773 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3774 3775 /* 5. miscompare with md separated where buf is different */ 3776 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3777 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3778 3779 g_io_done = false; 3780 g_compare_read_buf = buf_miscompare; 3781 g_compare_read_buf_len = sizeof(buf_miscompare); 3782 g_compare_md_buf = md_buf; 3783 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3784 offset, num_blocks, io_done, NULL); 3785 CU_ASSERT_EQUAL(rc, 0); 3786 num_completed = stub_complete_io(1); 3787 CU_ASSERT_EQUAL(num_completed, 1); 3788 CU_ASSERT(g_io_done == true); 3789 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3790 3791 bdev->md_len = 0; 3792 g_compare_md_buf = NULL; 3793 3794 spdk_put_io_channel(ioch); 3795 spdk_bdev_close(desc); 3796 free_bdev(bdev); 3797 fn_table.submit_request = stub_submit_request; 3798 ut_fini_bdev(); 3799 3800 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3801 3802 g_compare_read_buf = NULL; 3803 } 3804 3805 static void 3806 bdev_compare(void) 3807 { 3808 _bdev_compare(false); 3809 _bdev_compare_with_md(false); 3810 } 3811 3812 static void 3813 bdev_compare_emulated(void) 3814 { 3815 _bdev_compare(true); 3816 _bdev_compare_with_md(true); 3817 } 3818 3819 static void 3820 bdev_compare_and_write(void) 3821 { 3822 struct spdk_bdev *bdev; 3823 struct spdk_bdev_desc *desc = NULL; 3824 struct spdk_io_channel *ioch; 3825 struct ut_expected_io *expected_io; 3826 uint64_t offset, num_blocks; 3827 uint32_t num_completed; 3828 char aa_buf[512]; 3829 char bb_buf[512]; 3830 char cc_buf[512]; 3831 char write_buf[512]; 3832 struct iovec compare_iov; 3833 struct iovec write_iov; 3834 int rc; 3835 3836 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3837 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3838 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3839 3840 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3841 3842 ut_init_bdev(NULL); 3843 fn_table.submit_request = stub_submit_request_get_buf; 3844 bdev = allocate_bdev("bdev"); 3845 3846 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3847 CU_ASSERT_EQUAL(rc, 0); 3848 SPDK_CU_ASSERT_FATAL(desc != NULL); 3849 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3850 ioch = spdk_bdev_get_io_channel(desc); 3851 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3852 3853 fn_table.submit_request = stub_submit_request_get_buf; 3854 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3855 3856 offset = 50; 3857 num_blocks = 1; 3858 compare_iov.iov_base = aa_buf; 3859 compare_iov.iov_len = sizeof(aa_buf); 3860 write_iov.iov_base = bb_buf; 3861 write_iov.iov_len = sizeof(bb_buf); 3862 3863 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3864 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3865 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3866 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3867 3868 g_io_done = false; 3869 g_compare_read_buf = aa_buf; 3870 g_compare_read_buf_len = sizeof(aa_buf); 3871 memset(write_buf, 0, sizeof(write_buf)); 3872 g_compare_write_buf = write_buf; 3873 g_compare_write_buf_len = sizeof(write_buf); 3874 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3875 offset, num_blocks, io_done, NULL); 3876 /* Trigger range locking */ 3877 poll_threads(); 3878 CU_ASSERT_EQUAL(rc, 0); 3879 num_completed = stub_complete_io(1); 3880 CU_ASSERT_EQUAL(num_completed, 1); 3881 CU_ASSERT(g_io_done == false); 3882 num_completed = stub_complete_io(1); 3883 /* Trigger range unlocking */ 3884 poll_threads(); 3885 CU_ASSERT_EQUAL(num_completed, 1); 3886 CU_ASSERT(g_io_done == true); 3887 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3888 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3889 3890 /* Test miscompare */ 3891 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3892 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3893 3894 g_io_done = false; 3895 g_compare_read_buf = cc_buf; 3896 g_compare_read_buf_len = sizeof(cc_buf); 3897 memset(write_buf, 0, sizeof(write_buf)); 3898 g_compare_write_buf = write_buf; 3899 g_compare_write_buf_len = sizeof(write_buf); 3900 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3901 offset, num_blocks, io_done, NULL); 3902 /* Trigger range locking */ 3903 poll_threads(); 3904 CU_ASSERT_EQUAL(rc, 0); 3905 num_completed = stub_complete_io(1); 3906 /* Trigger range unlocking earlier because we expect error here */ 3907 poll_threads(); 3908 CU_ASSERT_EQUAL(num_completed, 1); 3909 CU_ASSERT(g_io_done == true); 3910 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3911 num_completed = stub_complete_io(1); 3912 CU_ASSERT_EQUAL(num_completed, 0); 3913 3914 spdk_put_io_channel(ioch); 3915 spdk_bdev_close(desc); 3916 free_bdev(bdev); 3917 fn_table.submit_request = stub_submit_request; 3918 ut_fini_bdev(); 3919 3920 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3921 3922 g_compare_read_buf = NULL; 3923 g_compare_write_buf = NULL; 3924 } 3925 3926 static void 3927 bdev_write_zeroes(void) 3928 { 3929 struct spdk_bdev *bdev; 3930 struct spdk_bdev_desc *desc = NULL; 3931 struct spdk_io_channel *ioch; 3932 struct ut_expected_io *expected_io; 3933 uint64_t offset, num_io_blocks, num_blocks; 3934 uint32_t num_completed, num_requests; 3935 int rc; 3936 3937 ut_init_bdev(NULL); 3938 bdev = allocate_bdev("bdev"); 3939 3940 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3941 CU_ASSERT_EQUAL(rc, 0); 3942 SPDK_CU_ASSERT_FATAL(desc != NULL); 3943 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3944 ioch = spdk_bdev_get_io_channel(desc); 3945 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3946 3947 fn_table.submit_request = stub_submit_request; 3948 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3949 3950 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3951 bdev->md_len = 0; 3952 bdev->blocklen = 4096; 3953 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3954 3955 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3956 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3957 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3958 CU_ASSERT_EQUAL(rc, 0); 3959 num_completed = stub_complete_io(1); 3960 CU_ASSERT_EQUAL(num_completed, 1); 3961 3962 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3963 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3964 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3965 num_requests = 2; 3966 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3967 3968 for (offset = 0; offset < num_requests; ++offset) { 3969 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3970 offset * num_io_blocks, num_io_blocks, 0); 3971 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3972 } 3973 3974 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3975 CU_ASSERT_EQUAL(rc, 0); 3976 num_completed = stub_complete_io(num_requests); 3977 CU_ASSERT_EQUAL(num_completed, num_requests); 3978 3979 /* Check that the splitting is correct if bdev has interleaved metadata */ 3980 bdev->md_interleave = true; 3981 bdev->md_len = 64; 3982 bdev->blocklen = 4096 + 64; 3983 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3984 3985 num_requests = offset = 0; 3986 while (offset < num_blocks) { 3987 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3988 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3989 offset, num_io_blocks, 0); 3990 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3991 offset += num_io_blocks; 3992 num_requests++; 3993 } 3994 3995 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3996 CU_ASSERT_EQUAL(rc, 0); 3997 num_completed = stub_complete_io(num_requests); 3998 CU_ASSERT_EQUAL(num_completed, num_requests); 3999 num_completed = stub_complete_io(num_requests); 4000 assert(num_completed == 0); 4001 4002 /* Check the the same for separate metadata buffer */ 4003 bdev->md_interleave = false; 4004 bdev->md_len = 64; 4005 bdev->blocklen = 4096; 4006 4007 num_requests = offset = 0; 4008 while (offset < num_blocks) { 4009 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 4010 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4011 offset, num_io_blocks, 0); 4012 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 4013 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4014 offset += num_io_blocks; 4015 num_requests++; 4016 } 4017 4018 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4019 CU_ASSERT_EQUAL(rc, 0); 4020 num_completed = stub_complete_io(num_requests); 4021 CU_ASSERT_EQUAL(num_completed, num_requests); 4022 4023 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 4024 spdk_put_io_channel(ioch); 4025 spdk_bdev_close(desc); 4026 free_bdev(bdev); 4027 ut_fini_bdev(); 4028 } 4029 4030 static void 4031 bdev_zcopy_write(void) 4032 { 4033 struct spdk_bdev *bdev; 4034 struct spdk_bdev_desc *desc = NULL; 4035 struct spdk_io_channel *ioch; 4036 struct ut_expected_io *expected_io; 4037 uint64_t offset, num_blocks; 4038 uint32_t num_completed; 4039 char aa_buf[512]; 4040 struct iovec iov; 4041 int rc; 4042 const bool populate = false; 4043 const bool commit = true; 4044 4045 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4046 4047 ut_init_bdev(NULL); 4048 bdev = allocate_bdev("bdev"); 4049 4050 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4051 CU_ASSERT_EQUAL(rc, 0); 4052 SPDK_CU_ASSERT_FATAL(desc != NULL); 4053 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4054 ioch = spdk_bdev_get_io_channel(desc); 4055 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4056 4057 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4058 4059 offset = 50; 4060 num_blocks = 1; 4061 iov.iov_base = NULL; 4062 iov.iov_len = 0; 4063 4064 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4065 g_zcopy_read_buf_len = (uint32_t) -1; 4066 /* Do a zcopy start for a write (populate=false) */ 4067 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4068 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4069 g_io_done = false; 4070 g_zcopy_write_buf = aa_buf; 4071 g_zcopy_write_buf_len = sizeof(aa_buf); 4072 g_zcopy_bdev_io = NULL; 4073 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4074 CU_ASSERT_EQUAL(rc, 0); 4075 num_completed = stub_complete_io(1); 4076 CU_ASSERT_EQUAL(num_completed, 1); 4077 CU_ASSERT(g_io_done == true); 4078 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4079 /* Check that the iov has been set up */ 4080 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4081 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4082 /* Check that the bdev_io has been saved */ 4083 CU_ASSERT(g_zcopy_bdev_io != NULL); 4084 /* Now do the zcopy end for a write (commit=true) */ 4085 g_io_done = false; 4086 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4087 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4088 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4089 CU_ASSERT_EQUAL(rc, 0); 4090 num_completed = stub_complete_io(1); 4091 CU_ASSERT_EQUAL(num_completed, 1); 4092 CU_ASSERT(g_io_done == true); 4093 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4094 /* Check the g_zcopy are reset by io_done */ 4095 CU_ASSERT(g_zcopy_write_buf == NULL); 4096 CU_ASSERT(g_zcopy_write_buf_len == 0); 4097 /* Check that io_done has freed the g_zcopy_bdev_io */ 4098 CU_ASSERT(g_zcopy_bdev_io == NULL); 4099 4100 /* Check the zcopy read buffer has not been touched which 4101 * ensures that the correct buffers were used. 4102 */ 4103 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4104 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4105 4106 spdk_put_io_channel(ioch); 4107 spdk_bdev_close(desc); 4108 free_bdev(bdev); 4109 ut_fini_bdev(); 4110 } 4111 4112 static void 4113 bdev_zcopy_read(void) 4114 { 4115 struct spdk_bdev *bdev; 4116 struct spdk_bdev_desc *desc = NULL; 4117 struct spdk_io_channel *ioch; 4118 struct ut_expected_io *expected_io; 4119 uint64_t offset, num_blocks; 4120 uint32_t num_completed; 4121 char aa_buf[512]; 4122 struct iovec iov; 4123 int rc; 4124 const bool populate = true; 4125 const bool commit = false; 4126 4127 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4128 4129 ut_init_bdev(NULL); 4130 bdev = allocate_bdev("bdev"); 4131 4132 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4133 CU_ASSERT_EQUAL(rc, 0); 4134 SPDK_CU_ASSERT_FATAL(desc != NULL); 4135 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4136 ioch = spdk_bdev_get_io_channel(desc); 4137 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4138 4139 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4140 4141 offset = 50; 4142 num_blocks = 1; 4143 iov.iov_base = NULL; 4144 iov.iov_len = 0; 4145 4146 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4147 g_zcopy_write_buf_len = (uint32_t) -1; 4148 4149 /* Do a zcopy start for a read (populate=true) */ 4150 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4151 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4152 g_io_done = false; 4153 g_zcopy_read_buf = aa_buf; 4154 g_zcopy_read_buf_len = sizeof(aa_buf); 4155 g_zcopy_bdev_io = NULL; 4156 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4157 CU_ASSERT_EQUAL(rc, 0); 4158 num_completed = stub_complete_io(1); 4159 CU_ASSERT_EQUAL(num_completed, 1); 4160 CU_ASSERT(g_io_done == true); 4161 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4162 /* Check that the iov has been set up */ 4163 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4164 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4165 /* Check that the bdev_io has been saved */ 4166 CU_ASSERT(g_zcopy_bdev_io != NULL); 4167 4168 /* Now do the zcopy end for a read (commit=false) */ 4169 g_io_done = false; 4170 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4171 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4172 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4173 CU_ASSERT_EQUAL(rc, 0); 4174 num_completed = stub_complete_io(1); 4175 CU_ASSERT_EQUAL(num_completed, 1); 4176 CU_ASSERT(g_io_done == true); 4177 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4178 /* Check the g_zcopy are reset by io_done */ 4179 CU_ASSERT(g_zcopy_read_buf == NULL); 4180 CU_ASSERT(g_zcopy_read_buf_len == 0); 4181 /* Check that io_done has freed the g_zcopy_bdev_io */ 4182 CU_ASSERT(g_zcopy_bdev_io == NULL); 4183 4184 /* Check the zcopy write buffer has not been touched which 4185 * ensures that the correct buffers were used. 4186 */ 4187 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4188 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4189 4190 spdk_put_io_channel(ioch); 4191 spdk_bdev_close(desc); 4192 free_bdev(bdev); 4193 ut_fini_bdev(); 4194 } 4195 4196 static void 4197 bdev_open_while_hotremove(void) 4198 { 4199 struct spdk_bdev *bdev; 4200 struct spdk_bdev_desc *desc[2] = {}; 4201 int rc; 4202 4203 bdev = allocate_bdev("bdev"); 4204 4205 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4206 CU_ASSERT(rc == 0); 4207 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4208 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4209 4210 spdk_bdev_unregister(bdev, NULL, NULL); 4211 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4212 poll_threads(); 4213 4214 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4215 CU_ASSERT(rc == -ENODEV); 4216 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4217 4218 spdk_bdev_close(desc[0]); 4219 free_bdev(bdev); 4220 } 4221 4222 static void 4223 bdev_close_while_hotremove(void) 4224 { 4225 struct spdk_bdev *bdev; 4226 struct spdk_bdev_desc *desc = NULL; 4227 int rc = 0; 4228 4229 bdev = allocate_bdev("bdev"); 4230 4231 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4232 CU_ASSERT_EQUAL(rc, 0); 4233 SPDK_CU_ASSERT_FATAL(desc != NULL); 4234 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4235 4236 /* Simulate hot-unplug by unregistering bdev */ 4237 g_event_type1 = 0xFF; 4238 g_unregister_arg = NULL; 4239 g_unregister_rc = -1; 4240 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4241 /* Close device while remove event is in flight */ 4242 spdk_bdev_close(desc); 4243 4244 /* Ensure that unregister callback is delayed */ 4245 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4246 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4247 4248 poll_threads(); 4249 4250 /* Event callback shall not be issued because device was closed */ 4251 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4252 /* Unregister callback is issued */ 4253 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4254 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4255 4256 free_bdev(bdev); 4257 } 4258 4259 static void 4260 bdev_open_ext(void) 4261 { 4262 struct spdk_bdev *bdev; 4263 struct spdk_bdev_desc *desc1 = NULL; 4264 struct spdk_bdev_desc *desc2 = NULL; 4265 int rc = 0; 4266 4267 bdev = allocate_bdev("bdev"); 4268 4269 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4270 CU_ASSERT_EQUAL(rc, -EINVAL); 4271 4272 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4273 CU_ASSERT_EQUAL(rc, 0); 4274 4275 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4276 CU_ASSERT_EQUAL(rc, 0); 4277 4278 g_event_type1 = 0xFF; 4279 g_event_type2 = 0xFF; 4280 4281 /* Simulate hot-unplug by unregistering bdev */ 4282 spdk_bdev_unregister(bdev, NULL, NULL); 4283 poll_threads(); 4284 4285 /* Check if correct events have been triggered in event callback fn */ 4286 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4287 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4288 4289 free_bdev(bdev); 4290 poll_threads(); 4291 } 4292 4293 static void 4294 bdev_open_ext_unregister(void) 4295 { 4296 struct spdk_bdev *bdev; 4297 struct spdk_bdev_desc *desc1 = NULL; 4298 struct spdk_bdev_desc *desc2 = NULL; 4299 struct spdk_bdev_desc *desc3 = NULL; 4300 struct spdk_bdev_desc *desc4 = NULL; 4301 int rc = 0; 4302 4303 bdev = allocate_bdev("bdev"); 4304 4305 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4306 CU_ASSERT_EQUAL(rc, -EINVAL); 4307 4308 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4309 CU_ASSERT_EQUAL(rc, 0); 4310 4311 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4312 CU_ASSERT_EQUAL(rc, 0); 4313 4314 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4315 CU_ASSERT_EQUAL(rc, 0); 4316 4317 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4318 CU_ASSERT_EQUAL(rc, 0); 4319 4320 g_event_type1 = 0xFF; 4321 g_event_type2 = 0xFF; 4322 g_event_type3 = 0xFF; 4323 g_event_type4 = 0xFF; 4324 4325 g_unregister_arg = NULL; 4326 g_unregister_rc = -1; 4327 4328 /* Simulate hot-unplug by unregistering bdev */ 4329 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4330 4331 /* 4332 * Unregister is handled asynchronously and event callback 4333 * (i.e., above bdev_open_cbN) will be called. 4334 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4335 * close the desc3 and desc4 so that the bdev is not closed. 4336 */ 4337 poll_threads(); 4338 4339 /* Check if correct events have been triggered in event callback fn */ 4340 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4341 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4342 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4343 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4344 4345 /* Check that unregister callback is delayed */ 4346 CU_ASSERT(g_unregister_arg == NULL); 4347 CU_ASSERT(g_unregister_rc == -1); 4348 4349 /* 4350 * Explicitly close desc3. As desc4 is still opened there, the 4351 * unergister callback is still delayed to execute. 4352 */ 4353 spdk_bdev_close(desc3); 4354 CU_ASSERT(g_unregister_arg == NULL); 4355 CU_ASSERT(g_unregister_rc == -1); 4356 4357 /* 4358 * Explicitly close desc4 to trigger the ongoing bdev unregister 4359 * operation after last desc is closed. 4360 */ 4361 spdk_bdev_close(desc4); 4362 4363 /* Poll the thread for the async unregister operation */ 4364 poll_threads(); 4365 4366 /* Check that unregister callback is executed */ 4367 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4368 CU_ASSERT(g_unregister_rc == 0); 4369 4370 free_bdev(bdev); 4371 poll_threads(); 4372 } 4373 4374 struct timeout_io_cb_arg { 4375 struct iovec iov; 4376 uint8_t type; 4377 }; 4378 4379 static int 4380 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4381 { 4382 struct spdk_bdev_io *bdev_io; 4383 int n = 0; 4384 4385 if (!ch) { 4386 return -1; 4387 } 4388 4389 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4390 n++; 4391 } 4392 4393 return n; 4394 } 4395 4396 static void 4397 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4398 { 4399 struct timeout_io_cb_arg *ctx = cb_arg; 4400 4401 ctx->type = bdev_io->type; 4402 ctx->iov.iov_base = bdev_io->iov.iov_base; 4403 ctx->iov.iov_len = bdev_io->iov.iov_len; 4404 } 4405 4406 static void 4407 bdev_set_io_timeout(void) 4408 { 4409 struct spdk_bdev *bdev; 4410 struct spdk_bdev_desc *desc = NULL; 4411 struct spdk_io_channel *io_ch = NULL; 4412 struct spdk_bdev_channel *bdev_ch = NULL; 4413 struct timeout_io_cb_arg cb_arg; 4414 4415 ut_init_bdev(NULL); 4416 bdev = allocate_bdev("bdev"); 4417 4418 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4419 SPDK_CU_ASSERT_FATAL(desc != NULL); 4420 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4421 4422 io_ch = spdk_bdev_get_io_channel(desc); 4423 CU_ASSERT(io_ch != NULL); 4424 4425 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4426 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4427 4428 /* This is the part1. 4429 * We will check the bdev_ch->io_submitted list 4430 * TO make sure that it can link IOs and only the user submitted IOs 4431 */ 4432 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4433 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4434 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4435 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4436 stub_complete_io(1); 4437 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4438 stub_complete_io(1); 4439 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4440 4441 /* Split IO */ 4442 bdev->optimal_io_boundary = 16; 4443 bdev->split_on_optimal_io_boundary = true; 4444 4445 /* Now test that a single-vector command is split correctly. 4446 * Offset 14, length 8, payload 0xF000 4447 * Child - Offset 14, length 2, payload 0xF000 4448 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4449 * 4450 * Set up the expected values before calling spdk_bdev_read_blocks 4451 */ 4452 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4453 /* We count all submitted IOs including IO that are generated by splitting. */ 4454 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4455 stub_complete_io(1); 4456 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4457 stub_complete_io(1); 4458 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4459 4460 /* Also include the reset IO */ 4461 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4462 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4463 poll_threads(); 4464 stub_complete_io(1); 4465 poll_threads(); 4466 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4467 4468 /* This is part2 4469 * Test the desc timeout poller register 4470 */ 4471 4472 /* Successfully set the timeout */ 4473 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4474 CU_ASSERT(desc->io_timeout_poller != NULL); 4475 CU_ASSERT(desc->timeout_in_sec == 30); 4476 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4477 CU_ASSERT(desc->cb_arg == &cb_arg); 4478 4479 /* Change the timeout limit */ 4480 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4481 CU_ASSERT(desc->io_timeout_poller != NULL); 4482 CU_ASSERT(desc->timeout_in_sec == 20); 4483 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4484 CU_ASSERT(desc->cb_arg == &cb_arg); 4485 4486 /* Disable the timeout */ 4487 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4488 CU_ASSERT(desc->io_timeout_poller == NULL); 4489 4490 /* This the part3 4491 * We will test to catch timeout IO and check whether the IO is 4492 * the submitted one. 4493 */ 4494 memset(&cb_arg, 0, sizeof(cb_arg)); 4495 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4496 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4497 4498 /* Don't reach the limit */ 4499 spdk_delay_us(15 * spdk_get_ticks_hz()); 4500 poll_threads(); 4501 CU_ASSERT(cb_arg.type == 0); 4502 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4503 CU_ASSERT(cb_arg.iov.iov_len == 0); 4504 4505 /* 15 + 15 = 30 reach the limit */ 4506 spdk_delay_us(15 * spdk_get_ticks_hz()); 4507 poll_threads(); 4508 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4509 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4510 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4511 stub_complete_io(1); 4512 4513 /* Use the same split IO above and check the IO */ 4514 memset(&cb_arg, 0, sizeof(cb_arg)); 4515 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4516 4517 /* The first child complete in time */ 4518 spdk_delay_us(15 * spdk_get_ticks_hz()); 4519 poll_threads(); 4520 stub_complete_io(1); 4521 CU_ASSERT(cb_arg.type == 0); 4522 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4523 CU_ASSERT(cb_arg.iov.iov_len == 0); 4524 4525 /* The second child reach the limit */ 4526 spdk_delay_us(15 * spdk_get_ticks_hz()); 4527 poll_threads(); 4528 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4529 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4530 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4531 stub_complete_io(1); 4532 4533 /* Also include the reset IO */ 4534 memset(&cb_arg, 0, sizeof(cb_arg)); 4535 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4536 spdk_delay_us(30 * spdk_get_ticks_hz()); 4537 poll_threads(); 4538 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4539 stub_complete_io(1); 4540 poll_threads(); 4541 4542 spdk_put_io_channel(io_ch); 4543 spdk_bdev_close(desc); 4544 free_bdev(bdev); 4545 ut_fini_bdev(); 4546 } 4547 4548 static void 4549 bdev_set_qd_sampling(void) 4550 { 4551 struct spdk_bdev *bdev; 4552 struct spdk_bdev_desc *desc = NULL; 4553 struct spdk_io_channel *io_ch = NULL; 4554 struct spdk_bdev_channel *bdev_ch = NULL; 4555 struct timeout_io_cb_arg cb_arg; 4556 4557 ut_init_bdev(NULL); 4558 bdev = allocate_bdev("bdev"); 4559 4560 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4561 SPDK_CU_ASSERT_FATAL(desc != NULL); 4562 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4563 4564 io_ch = spdk_bdev_get_io_channel(desc); 4565 CU_ASSERT(io_ch != NULL); 4566 4567 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4568 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4569 4570 /* This is the part1. 4571 * We will check the bdev_ch->io_submitted list 4572 * TO make sure that it can link IOs and only the user submitted IOs 4573 */ 4574 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4575 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4576 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4577 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4578 stub_complete_io(1); 4579 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4580 stub_complete_io(1); 4581 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4582 4583 /* This is the part2. 4584 * Test the bdev's qd poller register 4585 */ 4586 /* 1st Successfully set the qd sampling period */ 4587 spdk_bdev_set_qd_sampling_period(bdev, 10); 4588 CU_ASSERT(bdev->internal.new_period == 10); 4589 CU_ASSERT(bdev->internal.period == 10); 4590 CU_ASSERT(bdev->internal.qd_desc != NULL); 4591 poll_threads(); 4592 CU_ASSERT(bdev->internal.qd_poller != NULL); 4593 4594 /* 2nd Change the qd sampling period */ 4595 spdk_bdev_set_qd_sampling_period(bdev, 20); 4596 CU_ASSERT(bdev->internal.new_period == 20); 4597 CU_ASSERT(bdev->internal.period == 10); 4598 CU_ASSERT(bdev->internal.qd_desc != NULL); 4599 poll_threads(); 4600 CU_ASSERT(bdev->internal.qd_poller != NULL); 4601 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4602 4603 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4604 spdk_delay_us(20); 4605 poll_thread_times(0, 1); 4606 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4607 spdk_bdev_set_qd_sampling_period(bdev, 30); 4608 CU_ASSERT(bdev->internal.new_period == 30); 4609 CU_ASSERT(bdev->internal.period == 20); 4610 poll_threads(); 4611 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4612 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4613 4614 /* 4th Disable the qd sampling period */ 4615 spdk_bdev_set_qd_sampling_period(bdev, 0); 4616 CU_ASSERT(bdev->internal.new_period == 0); 4617 CU_ASSERT(bdev->internal.period == 30); 4618 poll_threads(); 4619 CU_ASSERT(bdev->internal.qd_poller == NULL); 4620 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4621 CU_ASSERT(bdev->internal.qd_desc == NULL); 4622 4623 /* This is the part3. 4624 * We will test the submitted IO and reset works 4625 * properly with the qd sampling. 4626 */ 4627 memset(&cb_arg, 0, sizeof(cb_arg)); 4628 spdk_bdev_set_qd_sampling_period(bdev, 1); 4629 poll_threads(); 4630 4631 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4632 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4633 4634 /* Also include the reset IO */ 4635 memset(&cb_arg, 0, sizeof(cb_arg)); 4636 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4637 poll_threads(); 4638 4639 /* Close the desc */ 4640 spdk_put_io_channel(io_ch); 4641 spdk_bdev_close(desc); 4642 4643 /* Complete the submitted IO and reset */ 4644 stub_complete_io(2); 4645 poll_threads(); 4646 4647 free_bdev(bdev); 4648 ut_fini_bdev(); 4649 } 4650 4651 static void 4652 lba_range_overlap(void) 4653 { 4654 struct lba_range r1, r2; 4655 4656 r1.offset = 100; 4657 r1.length = 50; 4658 4659 r2.offset = 0; 4660 r2.length = 1; 4661 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4662 4663 r2.offset = 0; 4664 r2.length = 100; 4665 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4666 4667 r2.offset = 0; 4668 r2.length = 110; 4669 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4670 4671 r2.offset = 100; 4672 r2.length = 10; 4673 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4674 4675 r2.offset = 110; 4676 r2.length = 20; 4677 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4678 4679 r2.offset = 140; 4680 r2.length = 150; 4681 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4682 4683 r2.offset = 130; 4684 r2.length = 200; 4685 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4686 4687 r2.offset = 150; 4688 r2.length = 100; 4689 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4690 4691 r2.offset = 110; 4692 r2.length = 0; 4693 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4694 } 4695 4696 static bool g_lock_lba_range_done; 4697 static bool g_unlock_lba_range_done; 4698 4699 static void 4700 lock_lba_range_done(void *ctx, int status) 4701 { 4702 g_lock_lba_range_done = true; 4703 } 4704 4705 static void 4706 unlock_lba_range_done(void *ctx, int status) 4707 { 4708 g_unlock_lba_range_done = true; 4709 } 4710 4711 static void 4712 lock_lba_range_check_ranges(void) 4713 { 4714 struct spdk_bdev *bdev; 4715 struct spdk_bdev_desc *desc = NULL; 4716 struct spdk_io_channel *io_ch; 4717 struct spdk_bdev_channel *channel; 4718 struct lba_range *range; 4719 int ctx1; 4720 int rc; 4721 4722 ut_init_bdev(NULL); 4723 bdev = allocate_bdev("bdev0"); 4724 4725 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4726 CU_ASSERT(rc == 0); 4727 CU_ASSERT(desc != NULL); 4728 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4729 io_ch = spdk_bdev_get_io_channel(desc); 4730 CU_ASSERT(io_ch != NULL); 4731 channel = spdk_io_channel_get_ctx(io_ch); 4732 4733 g_lock_lba_range_done = false; 4734 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4735 CU_ASSERT(rc == 0); 4736 poll_threads(); 4737 4738 CU_ASSERT(g_lock_lba_range_done == true); 4739 range = TAILQ_FIRST(&channel->locked_ranges); 4740 SPDK_CU_ASSERT_FATAL(range != NULL); 4741 CU_ASSERT(range->offset == 20); 4742 CU_ASSERT(range->length == 10); 4743 CU_ASSERT(range->owner_ch == channel); 4744 4745 /* Unlocks must exactly match a lock. */ 4746 g_unlock_lba_range_done = false; 4747 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4748 CU_ASSERT(rc == -EINVAL); 4749 CU_ASSERT(g_unlock_lba_range_done == false); 4750 4751 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4752 CU_ASSERT(rc == 0); 4753 spdk_delay_us(100); 4754 poll_threads(); 4755 4756 CU_ASSERT(g_unlock_lba_range_done == true); 4757 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4758 4759 spdk_put_io_channel(io_ch); 4760 spdk_bdev_close(desc); 4761 free_bdev(bdev); 4762 ut_fini_bdev(); 4763 } 4764 4765 static void 4766 lock_lba_range_with_io_outstanding(void) 4767 { 4768 struct spdk_bdev *bdev; 4769 struct spdk_bdev_desc *desc = NULL; 4770 struct spdk_io_channel *io_ch; 4771 struct spdk_bdev_channel *channel; 4772 struct lba_range *range; 4773 char buf[4096]; 4774 int ctx1; 4775 int rc; 4776 4777 ut_init_bdev(NULL); 4778 bdev = allocate_bdev("bdev0"); 4779 4780 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4781 CU_ASSERT(rc == 0); 4782 CU_ASSERT(desc != NULL); 4783 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4784 io_ch = spdk_bdev_get_io_channel(desc); 4785 CU_ASSERT(io_ch != NULL); 4786 channel = spdk_io_channel_get_ctx(io_ch); 4787 4788 g_io_done = false; 4789 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4790 CU_ASSERT(rc == 0); 4791 4792 g_lock_lba_range_done = false; 4793 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4794 CU_ASSERT(rc == 0); 4795 poll_threads(); 4796 4797 /* The lock should immediately become valid, since there are no outstanding 4798 * write I/O. 4799 */ 4800 CU_ASSERT(g_io_done == false); 4801 CU_ASSERT(g_lock_lba_range_done == true); 4802 range = TAILQ_FIRST(&channel->locked_ranges); 4803 SPDK_CU_ASSERT_FATAL(range != NULL); 4804 CU_ASSERT(range->offset == 20); 4805 CU_ASSERT(range->length == 10); 4806 CU_ASSERT(range->owner_ch == channel); 4807 CU_ASSERT(range->locked_ctx == &ctx1); 4808 4809 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4810 CU_ASSERT(rc == 0); 4811 stub_complete_io(1); 4812 spdk_delay_us(100); 4813 poll_threads(); 4814 4815 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4816 4817 /* Now try again, but with a write I/O. */ 4818 g_io_done = false; 4819 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4820 CU_ASSERT(rc == 0); 4821 4822 g_lock_lba_range_done = false; 4823 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4824 CU_ASSERT(rc == 0); 4825 poll_threads(); 4826 4827 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4828 * But note that the range should be on the channel's locked_list, to make sure no 4829 * new write I/O are started. 4830 */ 4831 CU_ASSERT(g_io_done == false); 4832 CU_ASSERT(g_lock_lba_range_done == false); 4833 range = TAILQ_FIRST(&channel->locked_ranges); 4834 SPDK_CU_ASSERT_FATAL(range != NULL); 4835 CU_ASSERT(range->offset == 20); 4836 CU_ASSERT(range->length == 10); 4837 4838 /* Complete the write I/O. This should make the lock valid (checked by confirming 4839 * our callback was invoked). 4840 */ 4841 stub_complete_io(1); 4842 spdk_delay_us(100); 4843 poll_threads(); 4844 CU_ASSERT(g_io_done == true); 4845 CU_ASSERT(g_lock_lba_range_done == true); 4846 4847 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4848 CU_ASSERT(rc == 0); 4849 poll_threads(); 4850 4851 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4852 4853 spdk_put_io_channel(io_ch); 4854 spdk_bdev_close(desc); 4855 free_bdev(bdev); 4856 ut_fini_bdev(); 4857 } 4858 4859 static void 4860 lock_lba_range_overlapped(void) 4861 { 4862 struct spdk_bdev *bdev; 4863 struct spdk_bdev_desc *desc = NULL; 4864 struct spdk_io_channel *io_ch; 4865 struct spdk_bdev_channel *channel; 4866 struct lba_range *range; 4867 int ctx1; 4868 int rc; 4869 4870 ut_init_bdev(NULL); 4871 bdev = allocate_bdev("bdev0"); 4872 4873 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4874 CU_ASSERT(rc == 0); 4875 CU_ASSERT(desc != NULL); 4876 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4877 io_ch = spdk_bdev_get_io_channel(desc); 4878 CU_ASSERT(io_ch != NULL); 4879 channel = spdk_io_channel_get_ctx(io_ch); 4880 4881 /* Lock range 20-29. */ 4882 g_lock_lba_range_done = false; 4883 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4884 CU_ASSERT(rc == 0); 4885 poll_threads(); 4886 4887 CU_ASSERT(g_lock_lba_range_done == true); 4888 range = TAILQ_FIRST(&channel->locked_ranges); 4889 SPDK_CU_ASSERT_FATAL(range != NULL); 4890 CU_ASSERT(range->offset == 20); 4891 CU_ASSERT(range->length == 10); 4892 4893 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4894 * 20-29. 4895 */ 4896 g_lock_lba_range_done = false; 4897 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4898 CU_ASSERT(rc == 0); 4899 poll_threads(); 4900 4901 CU_ASSERT(g_lock_lba_range_done == false); 4902 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4903 SPDK_CU_ASSERT_FATAL(range != NULL); 4904 CU_ASSERT(range->offset == 25); 4905 CU_ASSERT(range->length == 15); 4906 4907 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4908 * no longer overlaps with an active lock. 4909 */ 4910 g_unlock_lba_range_done = false; 4911 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4912 CU_ASSERT(rc == 0); 4913 poll_threads(); 4914 4915 CU_ASSERT(g_unlock_lba_range_done == true); 4916 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4917 range = TAILQ_FIRST(&channel->locked_ranges); 4918 SPDK_CU_ASSERT_FATAL(range != NULL); 4919 CU_ASSERT(range->offset == 25); 4920 CU_ASSERT(range->length == 15); 4921 4922 /* Lock 40-59. This should immediately lock since it does not overlap with the 4923 * currently active 25-39 lock. 4924 */ 4925 g_lock_lba_range_done = false; 4926 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4927 CU_ASSERT(rc == 0); 4928 poll_threads(); 4929 4930 CU_ASSERT(g_lock_lba_range_done == true); 4931 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4932 SPDK_CU_ASSERT_FATAL(range != NULL); 4933 range = TAILQ_NEXT(range, tailq); 4934 SPDK_CU_ASSERT_FATAL(range != NULL); 4935 CU_ASSERT(range->offset == 40); 4936 CU_ASSERT(range->length == 20); 4937 4938 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4939 g_lock_lba_range_done = false; 4940 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4941 CU_ASSERT(rc == 0); 4942 poll_threads(); 4943 4944 CU_ASSERT(g_lock_lba_range_done == false); 4945 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4946 SPDK_CU_ASSERT_FATAL(range != NULL); 4947 CU_ASSERT(range->offset == 35); 4948 CU_ASSERT(range->length == 10); 4949 4950 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4951 * the 40-59 lock is still active. 4952 */ 4953 g_unlock_lba_range_done = false; 4954 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4955 CU_ASSERT(rc == 0); 4956 poll_threads(); 4957 4958 CU_ASSERT(g_unlock_lba_range_done == true); 4959 CU_ASSERT(g_lock_lba_range_done == false); 4960 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4961 SPDK_CU_ASSERT_FATAL(range != NULL); 4962 CU_ASSERT(range->offset == 35); 4963 CU_ASSERT(range->length == 10); 4964 4965 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4966 * no longer any active overlapping locks. 4967 */ 4968 g_unlock_lba_range_done = false; 4969 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4970 CU_ASSERT(rc == 0); 4971 poll_threads(); 4972 4973 CU_ASSERT(g_unlock_lba_range_done == true); 4974 CU_ASSERT(g_lock_lba_range_done == true); 4975 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4976 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4977 SPDK_CU_ASSERT_FATAL(range != NULL); 4978 CU_ASSERT(range->offset == 35); 4979 CU_ASSERT(range->length == 10); 4980 4981 /* Finally, unlock 35-44. */ 4982 g_unlock_lba_range_done = false; 4983 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4984 CU_ASSERT(rc == 0); 4985 poll_threads(); 4986 4987 CU_ASSERT(g_unlock_lba_range_done == true); 4988 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4989 4990 spdk_put_io_channel(io_ch); 4991 spdk_bdev_close(desc); 4992 free_bdev(bdev); 4993 ut_fini_bdev(); 4994 } 4995 4996 static void 4997 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4998 { 4999 g_abort_done = true; 5000 g_abort_status = bdev_io->internal.status; 5001 spdk_bdev_free_io(bdev_io); 5002 } 5003 5004 static void 5005 bdev_io_abort(void) 5006 { 5007 struct spdk_bdev *bdev; 5008 struct spdk_bdev_desc *desc = NULL; 5009 struct spdk_io_channel *io_ch; 5010 struct spdk_bdev_channel *channel; 5011 struct spdk_bdev_mgmt_channel *mgmt_ch; 5012 struct spdk_bdev_opts bdev_opts = {}; 5013 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 5014 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5015 int rc; 5016 5017 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5018 bdev_opts.bdev_io_pool_size = 7; 5019 bdev_opts.bdev_io_cache_size = 2; 5020 ut_init_bdev(&bdev_opts); 5021 5022 bdev = allocate_bdev("bdev0"); 5023 5024 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5025 CU_ASSERT(rc == 0); 5026 CU_ASSERT(desc != NULL); 5027 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5028 io_ch = spdk_bdev_get_io_channel(desc); 5029 CU_ASSERT(io_ch != NULL); 5030 channel = spdk_io_channel_get_ctx(io_ch); 5031 mgmt_ch = channel->shared_resource->mgmt_ch; 5032 5033 g_abort_done = false; 5034 5035 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5036 5037 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5038 CU_ASSERT(rc == -ENOTSUP); 5039 5040 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5041 5042 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5043 CU_ASSERT(rc == 0); 5044 CU_ASSERT(g_abort_done == true); 5045 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5046 5047 /* Test the case that the target I/O was successfully aborted. */ 5048 g_io_done = false; 5049 5050 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5051 CU_ASSERT(rc == 0); 5052 CU_ASSERT(g_io_done == false); 5053 5054 g_abort_done = false; 5055 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5056 5057 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5058 CU_ASSERT(rc == 0); 5059 CU_ASSERT(g_io_done == true); 5060 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5061 stub_complete_io(1); 5062 CU_ASSERT(g_abort_done == true); 5063 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5064 5065 /* Test the case that the target I/O was not aborted because it completed 5066 * in the middle of execution of the abort. 5067 */ 5068 g_io_done = false; 5069 5070 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5071 CU_ASSERT(rc == 0); 5072 CU_ASSERT(g_io_done == false); 5073 5074 g_abort_done = false; 5075 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5076 5077 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5078 CU_ASSERT(rc == 0); 5079 CU_ASSERT(g_io_done == false); 5080 5081 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5082 stub_complete_io(1); 5083 CU_ASSERT(g_io_done == true); 5084 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5085 5086 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5087 stub_complete_io(1); 5088 CU_ASSERT(g_abort_done == true); 5089 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5090 5091 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5092 5093 bdev->optimal_io_boundary = 16; 5094 bdev->split_on_optimal_io_boundary = true; 5095 5096 /* Test that a single-vector command which is split is aborted correctly. 5097 * Offset 14, length 8, payload 0xF000 5098 * Child - Offset 14, length 2, payload 0xF000 5099 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5100 */ 5101 g_io_done = false; 5102 5103 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5104 CU_ASSERT(rc == 0); 5105 CU_ASSERT(g_io_done == false); 5106 5107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5108 5109 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5110 5111 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5112 CU_ASSERT(rc == 0); 5113 CU_ASSERT(g_io_done == true); 5114 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5115 stub_complete_io(2); 5116 CU_ASSERT(g_abort_done == true); 5117 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5118 5119 /* Test that a multi-vector command that needs to be split by strip and then 5120 * needs to be split is aborted correctly. Abort is requested before the second 5121 * child I/O was submitted. The parent I/O should complete with failure without 5122 * submitting the second child I/O. 5123 */ 5124 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5125 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5126 iov[i].iov_len = 512; 5127 } 5128 5129 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5130 g_io_done = false; 5131 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5132 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5133 CU_ASSERT(rc == 0); 5134 CU_ASSERT(g_io_done == false); 5135 5136 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5137 5138 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5139 5140 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5141 CU_ASSERT(rc == 0); 5142 CU_ASSERT(g_io_done == true); 5143 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5144 stub_complete_io(1); 5145 CU_ASSERT(g_abort_done == true); 5146 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5147 5148 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5149 5150 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5151 5152 bdev->optimal_io_boundary = 16; 5153 g_io_done = false; 5154 5155 /* Test that a ingle-vector command which is split is aborted correctly. 5156 * Differently from the above, the child abort request will be submitted 5157 * sequentially due to the capacity of spdk_bdev_io. 5158 */ 5159 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5160 CU_ASSERT(rc == 0); 5161 CU_ASSERT(g_io_done == false); 5162 5163 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5164 5165 g_abort_done = false; 5166 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5167 5168 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5169 CU_ASSERT(rc == 0); 5170 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5171 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5172 5173 stub_complete_io(1); 5174 CU_ASSERT(g_io_done == true); 5175 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5176 stub_complete_io(3); 5177 CU_ASSERT(g_abort_done == true); 5178 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5179 5180 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5181 5182 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5183 5184 spdk_put_io_channel(io_ch); 5185 spdk_bdev_close(desc); 5186 free_bdev(bdev); 5187 ut_fini_bdev(); 5188 } 5189 5190 static void 5191 bdev_unmap(void) 5192 { 5193 struct spdk_bdev *bdev; 5194 struct spdk_bdev_desc *desc = NULL; 5195 struct spdk_io_channel *ioch; 5196 struct spdk_bdev_channel *bdev_ch; 5197 struct ut_expected_io *expected_io; 5198 struct spdk_bdev_opts bdev_opts = {}; 5199 uint32_t i, num_outstanding; 5200 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5201 int rc; 5202 5203 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5204 bdev_opts.bdev_io_pool_size = 512; 5205 bdev_opts.bdev_io_cache_size = 64; 5206 ut_init_bdev(&bdev_opts); 5207 5208 bdev = allocate_bdev("bdev"); 5209 5210 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5211 CU_ASSERT_EQUAL(rc, 0); 5212 SPDK_CU_ASSERT_FATAL(desc != NULL); 5213 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5214 ioch = spdk_bdev_get_io_channel(desc); 5215 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5216 bdev_ch = spdk_io_channel_get_ctx(ioch); 5217 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5218 5219 fn_table.submit_request = stub_submit_request; 5220 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5221 5222 /* Case 1: First test the request won't be split */ 5223 num_blocks = 32; 5224 5225 g_io_done = false; 5226 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5227 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5228 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5229 CU_ASSERT_EQUAL(rc, 0); 5230 CU_ASSERT(g_io_done == false); 5231 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5232 stub_complete_io(1); 5233 CU_ASSERT(g_io_done == true); 5234 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5235 5236 /* Case 2: Test the split with 2 children requests */ 5237 bdev->max_unmap = 8; 5238 bdev->max_unmap_segments = 2; 5239 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5240 num_blocks = max_unmap_blocks * 2; 5241 offset = 0; 5242 5243 g_io_done = false; 5244 for (i = 0; i < 2; i++) { 5245 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5246 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5247 offset += max_unmap_blocks; 5248 } 5249 5250 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5251 CU_ASSERT_EQUAL(rc, 0); 5252 CU_ASSERT(g_io_done == false); 5253 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5254 stub_complete_io(2); 5255 CU_ASSERT(g_io_done == true); 5256 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5257 5258 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5259 num_children = 15; 5260 num_blocks = max_unmap_blocks * num_children; 5261 g_io_done = false; 5262 offset = 0; 5263 for (i = 0; i < num_children; i++) { 5264 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5265 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5266 offset += max_unmap_blocks; 5267 } 5268 5269 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5270 CU_ASSERT_EQUAL(rc, 0); 5271 CU_ASSERT(g_io_done == false); 5272 5273 while (num_children > 0) { 5274 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5275 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5276 stub_complete_io(num_outstanding); 5277 num_children -= num_outstanding; 5278 } 5279 CU_ASSERT(g_io_done == true); 5280 5281 spdk_put_io_channel(ioch); 5282 spdk_bdev_close(desc); 5283 free_bdev(bdev); 5284 ut_fini_bdev(); 5285 } 5286 5287 static void 5288 bdev_write_zeroes_split_test(void) 5289 { 5290 struct spdk_bdev *bdev; 5291 struct spdk_bdev_desc *desc = NULL; 5292 struct spdk_io_channel *ioch; 5293 struct spdk_bdev_channel *bdev_ch; 5294 struct ut_expected_io *expected_io; 5295 struct spdk_bdev_opts bdev_opts = {}; 5296 uint32_t i, num_outstanding; 5297 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5298 int rc; 5299 5300 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5301 bdev_opts.bdev_io_pool_size = 512; 5302 bdev_opts.bdev_io_cache_size = 64; 5303 ut_init_bdev(&bdev_opts); 5304 5305 bdev = allocate_bdev("bdev"); 5306 5307 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5308 CU_ASSERT_EQUAL(rc, 0); 5309 SPDK_CU_ASSERT_FATAL(desc != NULL); 5310 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5311 ioch = spdk_bdev_get_io_channel(desc); 5312 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5313 bdev_ch = spdk_io_channel_get_ctx(ioch); 5314 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5315 5316 fn_table.submit_request = stub_submit_request; 5317 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5318 5319 /* Case 1: First test the request won't be split */ 5320 num_blocks = 32; 5321 5322 g_io_done = false; 5323 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5324 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5325 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5326 CU_ASSERT_EQUAL(rc, 0); 5327 CU_ASSERT(g_io_done == false); 5328 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5329 stub_complete_io(1); 5330 CU_ASSERT(g_io_done == true); 5331 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5332 5333 /* Case 2: Test the split with 2 children requests */ 5334 max_write_zeroes_blocks = 8; 5335 bdev->max_write_zeroes = max_write_zeroes_blocks; 5336 num_blocks = max_write_zeroes_blocks * 2; 5337 offset = 0; 5338 5339 g_io_done = false; 5340 for (i = 0; i < 2; i++) { 5341 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5342 0); 5343 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5344 offset += max_write_zeroes_blocks; 5345 } 5346 5347 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5348 CU_ASSERT_EQUAL(rc, 0); 5349 CU_ASSERT(g_io_done == false); 5350 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5351 stub_complete_io(2); 5352 CU_ASSERT(g_io_done == true); 5353 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5354 5355 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5356 num_children = 15; 5357 num_blocks = max_write_zeroes_blocks * num_children; 5358 g_io_done = false; 5359 offset = 0; 5360 for (i = 0; i < num_children; i++) { 5361 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5362 0); 5363 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5364 offset += max_write_zeroes_blocks; 5365 } 5366 5367 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5368 CU_ASSERT_EQUAL(rc, 0); 5369 CU_ASSERT(g_io_done == false); 5370 5371 while (num_children > 0) { 5372 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5373 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5374 stub_complete_io(num_outstanding); 5375 num_children -= num_outstanding; 5376 } 5377 CU_ASSERT(g_io_done == true); 5378 5379 spdk_put_io_channel(ioch); 5380 spdk_bdev_close(desc); 5381 free_bdev(bdev); 5382 ut_fini_bdev(); 5383 } 5384 5385 static void 5386 bdev_set_options_test(void) 5387 { 5388 struct spdk_bdev_opts bdev_opts = {}; 5389 int rc; 5390 5391 /* Case1: Do not set opts_size */ 5392 rc = spdk_bdev_set_opts(&bdev_opts); 5393 CU_ASSERT(rc == -1); 5394 5395 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5396 bdev_opts.bdev_io_pool_size = 4; 5397 bdev_opts.bdev_io_cache_size = 2; 5398 bdev_opts.small_buf_pool_size = 4; 5399 5400 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5401 rc = spdk_bdev_set_opts(&bdev_opts); 5402 CU_ASSERT(rc == -1); 5403 5404 /* Case 3: Do not set valid large_buf_pool_size */ 5405 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5406 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5407 rc = spdk_bdev_set_opts(&bdev_opts); 5408 CU_ASSERT(rc == -1); 5409 5410 /* Case4: set valid large buf_pool_size */ 5411 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5412 rc = spdk_bdev_set_opts(&bdev_opts); 5413 CU_ASSERT(rc == 0); 5414 5415 /* Case5: Set different valid value for small and large buf pool */ 5416 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5417 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5418 rc = spdk_bdev_set_opts(&bdev_opts); 5419 CU_ASSERT(rc == 0); 5420 } 5421 5422 static uint64_t 5423 get_ns_time(void) 5424 { 5425 int rc; 5426 struct timespec ts; 5427 5428 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5429 CU_ASSERT(rc == 0); 5430 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5431 } 5432 5433 static int 5434 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5435 { 5436 int h1, h2; 5437 5438 if (bdev_name == NULL) { 5439 return -1; 5440 } else { 5441 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5442 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5443 5444 return spdk_max(h1, h2) + 1; 5445 } 5446 } 5447 5448 static void 5449 bdev_multi_allocation(void) 5450 { 5451 const int max_bdev_num = 1024 * 16; 5452 char name[max_bdev_num][16]; 5453 char noexist_name[] = "invalid_bdev"; 5454 struct spdk_bdev *bdev[max_bdev_num]; 5455 int i, j; 5456 uint64_t last_time; 5457 int bdev_num; 5458 int height; 5459 5460 for (j = 0; j < max_bdev_num; j++) { 5461 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5462 } 5463 5464 for (i = 0; i < 16; i++) { 5465 last_time = get_ns_time(); 5466 bdev_num = 1024 * (i + 1); 5467 for (j = 0; j < bdev_num; j++) { 5468 bdev[j] = allocate_bdev(name[j]); 5469 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5470 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5471 } 5472 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5473 (get_ns_time() - last_time) / 1000 / 1000); 5474 for (j = 0; j < bdev_num; j++) { 5475 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5476 } 5477 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5478 5479 for (j = 0; j < bdev_num; j++) { 5480 free_bdev(bdev[j]); 5481 } 5482 for (j = 0; j < bdev_num; j++) { 5483 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5484 } 5485 } 5486 } 5487 5488 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5489 5490 static int 5491 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5492 int array_size) 5493 { 5494 if (array_size > 0 && domains) { 5495 domains[0] = g_bdev_memory_domain; 5496 } 5497 5498 return 1; 5499 } 5500 5501 static void 5502 bdev_get_memory_domains(void) 5503 { 5504 struct spdk_bdev_fn_table fn_table = { 5505 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5506 }; 5507 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5508 struct spdk_memory_domain *domains[2] = {}; 5509 int rc; 5510 5511 /* bdev is NULL */ 5512 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5513 CU_ASSERT(rc == -EINVAL); 5514 5515 /* domains is NULL */ 5516 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5517 CU_ASSERT(rc == 1); 5518 5519 /* array size is 0 */ 5520 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5521 CU_ASSERT(rc == 1); 5522 5523 /* get_supported_dma_device_types op is set */ 5524 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5525 CU_ASSERT(rc == 1); 5526 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5527 5528 /* get_supported_dma_device_types op is not set */ 5529 fn_table.get_memory_domains = NULL; 5530 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5531 CU_ASSERT(rc == 0); 5532 } 5533 5534 static void 5535 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5536 { 5537 struct spdk_bdev *bdev; 5538 struct spdk_bdev_desc *desc = NULL; 5539 struct spdk_io_channel *io_ch; 5540 char io_buf[512]; 5541 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5542 struct ut_expected_io *expected_io; 5543 int rc; 5544 5545 ut_init_bdev(NULL); 5546 5547 bdev = allocate_bdev("bdev0"); 5548 bdev->md_interleave = false; 5549 bdev->md_len = 8; 5550 5551 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5552 CU_ASSERT(rc == 0); 5553 SPDK_CU_ASSERT_FATAL(desc != NULL); 5554 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5555 io_ch = spdk_bdev_get_io_channel(desc); 5556 CU_ASSERT(io_ch != NULL); 5557 5558 /* read */ 5559 g_io_done = false; 5560 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5561 if (ext_io_opts) { 5562 expected_io->md_buf = ext_io_opts->metadata; 5563 } 5564 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5565 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5566 5567 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5568 5569 CU_ASSERT(rc == 0); 5570 CU_ASSERT(g_io_done == false); 5571 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5572 stub_complete_io(1); 5573 CU_ASSERT(g_io_done == true); 5574 5575 /* write */ 5576 g_io_done = false; 5577 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5578 if (ext_io_opts) { 5579 expected_io->md_buf = ext_io_opts->metadata; 5580 } 5581 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5582 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5583 5584 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5585 5586 CU_ASSERT(rc == 0); 5587 CU_ASSERT(g_io_done == false); 5588 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5589 stub_complete_io(1); 5590 CU_ASSERT(g_io_done == true); 5591 5592 spdk_put_io_channel(io_ch); 5593 spdk_bdev_close(desc); 5594 free_bdev(bdev); 5595 ut_fini_bdev(); 5596 5597 } 5598 5599 static void 5600 bdev_io_ext(void) 5601 { 5602 struct spdk_bdev_ext_io_opts ext_io_opts = { 5603 .metadata = (void *)0xFF000000, 5604 .size = sizeof(ext_io_opts) 5605 }; 5606 5607 _bdev_io_ext(&ext_io_opts); 5608 } 5609 5610 static void 5611 bdev_io_ext_no_opts(void) 5612 { 5613 _bdev_io_ext(NULL); 5614 } 5615 5616 static void 5617 bdev_io_ext_invalid_opts(void) 5618 { 5619 struct spdk_bdev *bdev; 5620 struct spdk_bdev_desc *desc = NULL; 5621 struct spdk_io_channel *io_ch; 5622 char io_buf[512]; 5623 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5624 struct spdk_bdev_ext_io_opts ext_io_opts = { 5625 .metadata = (void *)0xFF000000, 5626 .size = sizeof(ext_io_opts) 5627 }; 5628 int rc; 5629 5630 ut_init_bdev(NULL); 5631 5632 bdev = allocate_bdev("bdev0"); 5633 bdev->md_interleave = false; 5634 bdev->md_len = 8; 5635 5636 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5637 CU_ASSERT(rc == 0); 5638 SPDK_CU_ASSERT_FATAL(desc != NULL); 5639 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5640 io_ch = spdk_bdev_get_io_channel(desc); 5641 CU_ASSERT(io_ch != NULL); 5642 5643 /* Test invalid ext_opts size */ 5644 ext_io_opts.size = 0; 5645 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5646 CU_ASSERT(rc == -EINVAL); 5647 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5648 CU_ASSERT(rc == -EINVAL); 5649 5650 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5651 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5652 CU_ASSERT(rc == -EINVAL); 5653 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5654 CU_ASSERT(rc == -EINVAL); 5655 5656 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5657 sizeof(ext_io_opts.metadata) - 1; 5658 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5659 CU_ASSERT(rc == -EINVAL); 5660 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5661 CU_ASSERT(rc == -EINVAL); 5662 5663 spdk_put_io_channel(io_ch); 5664 spdk_bdev_close(desc); 5665 free_bdev(bdev); 5666 ut_fini_bdev(); 5667 } 5668 5669 static void 5670 bdev_io_ext_split(void) 5671 { 5672 struct spdk_bdev *bdev; 5673 struct spdk_bdev_desc *desc = NULL; 5674 struct spdk_io_channel *io_ch; 5675 char io_buf[512]; 5676 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5677 struct ut_expected_io *expected_io; 5678 struct spdk_bdev_ext_io_opts ext_io_opts = { 5679 .metadata = (void *)0xFF000000, 5680 .size = sizeof(ext_io_opts) 5681 }; 5682 int rc; 5683 5684 ut_init_bdev(NULL); 5685 5686 bdev = allocate_bdev("bdev0"); 5687 bdev->md_interleave = false; 5688 bdev->md_len = 8; 5689 5690 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5691 CU_ASSERT(rc == 0); 5692 SPDK_CU_ASSERT_FATAL(desc != NULL); 5693 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5694 io_ch = spdk_bdev_get_io_channel(desc); 5695 CU_ASSERT(io_ch != NULL); 5696 5697 /* Check that IO request with ext_opts and metadata is split correctly 5698 * Offset 14, length 8, payload 0xF000 5699 * Child - Offset 14, length 2, payload 0xF000 5700 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5701 */ 5702 bdev->optimal_io_boundary = 16; 5703 bdev->split_on_optimal_io_boundary = true; 5704 bdev->md_interleave = false; 5705 bdev->md_len = 8; 5706 5707 iov.iov_base = (void *)0xF000; 5708 iov.iov_len = 4096; 5709 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5710 ext_io_opts.metadata = (void *)0xFF000000; 5711 ext_io_opts.size = sizeof(ext_io_opts); 5712 g_io_done = false; 5713 5714 /* read */ 5715 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5716 expected_io->md_buf = ext_io_opts.metadata; 5717 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5718 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5719 5720 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5721 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5722 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5723 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5724 5725 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5726 CU_ASSERT(rc == 0); 5727 CU_ASSERT(g_io_done == false); 5728 5729 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5730 stub_complete_io(2); 5731 CU_ASSERT(g_io_done == true); 5732 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5733 5734 /* write */ 5735 g_io_done = false; 5736 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5737 expected_io->md_buf = ext_io_opts.metadata; 5738 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5739 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5740 5741 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5742 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5743 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5744 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5745 5746 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5747 CU_ASSERT(rc == 0); 5748 CU_ASSERT(g_io_done == false); 5749 5750 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5751 stub_complete_io(2); 5752 CU_ASSERT(g_io_done == true); 5753 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5754 5755 spdk_put_io_channel(io_ch); 5756 spdk_bdev_close(desc); 5757 free_bdev(bdev); 5758 ut_fini_bdev(); 5759 } 5760 5761 static void 5762 bdev_io_ext_bounce_buffer(void) 5763 { 5764 struct spdk_bdev *bdev; 5765 struct spdk_bdev_desc *desc = NULL; 5766 struct spdk_io_channel *io_ch; 5767 char io_buf[512]; 5768 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5769 struct ut_expected_io *expected_io; 5770 struct spdk_bdev_ext_io_opts ext_io_opts = { 5771 .metadata = (void *)0xFF000000, 5772 .size = sizeof(ext_io_opts) 5773 }; 5774 int rc; 5775 5776 ut_init_bdev(NULL); 5777 5778 bdev = allocate_bdev("bdev0"); 5779 bdev->md_interleave = false; 5780 bdev->md_len = 8; 5781 5782 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5783 CU_ASSERT(rc == 0); 5784 SPDK_CU_ASSERT_FATAL(desc != NULL); 5785 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5786 io_ch = spdk_bdev_get_io_channel(desc); 5787 CU_ASSERT(io_ch != NULL); 5788 5789 /* Verify data pull/push 5790 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5791 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5792 5793 /* read */ 5794 g_io_done = false; 5795 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5796 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5797 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5798 5799 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5800 5801 CU_ASSERT(rc == 0); 5802 CU_ASSERT(g_io_done == false); 5803 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5804 stub_complete_io(1); 5805 CU_ASSERT(g_memory_domain_push_data_called == true); 5806 CU_ASSERT(g_io_done == true); 5807 5808 /* write */ 5809 g_io_done = false; 5810 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5811 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5812 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5813 5814 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5815 5816 CU_ASSERT(rc == 0); 5817 CU_ASSERT(g_memory_domain_pull_data_called == true); 5818 CU_ASSERT(g_io_done == false); 5819 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5820 stub_complete_io(1); 5821 CU_ASSERT(g_io_done == true); 5822 5823 spdk_put_io_channel(io_ch); 5824 spdk_bdev_close(desc); 5825 free_bdev(bdev); 5826 ut_fini_bdev(); 5827 } 5828 5829 static void 5830 bdev_register_uuid_alias(void) 5831 { 5832 struct spdk_bdev *bdev, *second; 5833 char uuid[SPDK_UUID_STRING_LEN]; 5834 int rc; 5835 5836 ut_init_bdev(NULL); 5837 bdev = allocate_bdev("bdev0"); 5838 5839 /* Make sure an UUID was generated */ 5840 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5841 5842 /* Check that an UUID alias was registered */ 5843 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5844 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5845 5846 /* Unregister the bdev */ 5847 spdk_bdev_unregister(bdev, NULL, NULL); 5848 poll_threads(); 5849 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5850 5851 /* Check the same, but this time register the bdev with non-zero UUID */ 5852 rc = spdk_bdev_register(bdev); 5853 CU_ASSERT_EQUAL(rc, 0); 5854 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5855 5856 /* Unregister the bdev */ 5857 spdk_bdev_unregister(bdev, NULL, NULL); 5858 poll_threads(); 5859 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5860 5861 /* Regiser the bdev using UUID as the name */ 5862 bdev->name = uuid; 5863 rc = spdk_bdev_register(bdev); 5864 CU_ASSERT_EQUAL(rc, 0); 5865 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5866 5867 /* Unregister the bdev */ 5868 spdk_bdev_unregister(bdev, NULL, NULL); 5869 poll_threads(); 5870 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5871 5872 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5873 bdev->name = "bdev0"; 5874 second = allocate_bdev("bdev1"); 5875 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5876 rc = spdk_bdev_register(bdev); 5877 CU_ASSERT_EQUAL(rc, -EEXIST); 5878 5879 /* Regenerate the UUID and re-check */ 5880 spdk_uuid_generate(&bdev->uuid); 5881 rc = spdk_bdev_register(bdev); 5882 CU_ASSERT_EQUAL(rc, 0); 5883 5884 /* And check that both bdevs can be retrieved through their UUIDs */ 5885 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5886 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5887 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5888 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5889 5890 free_bdev(second); 5891 free_bdev(bdev); 5892 ut_fini_bdev(); 5893 } 5894 5895 static void 5896 bdev_unregister_by_name(void) 5897 { 5898 struct spdk_bdev *bdev; 5899 int rc; 5900 5901 bdev = allocate_bdev("bdev"); 5902 5903 g_event_type1 = 0xFF; 5904 g_unregister_arg = NULL; 5905 g_unregister_rc = -1; 5906 5907 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5908 CU_ASSERT(rc == -ENODEV); 5909 5910 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5911 CU_ASSERT(rc == -ENODEV); 5912 5913 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5914 CU_ASSERT(rc == 0); 5915 5916 /* Check that unregister callback is delayed */ 5917 CU_ASSERT(g_unregister_arg == NULL); 5918 CU_ASSERT(g_unregister_rc == -1); 5919 5920 poll_threads(); 5921 5922 /* Event callback shall not be issued because device was closed */ 5923 CU_ASSERT(g_event_type1 == 0xFF); 5924 /* Unregister callback is issued */ 5925 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5926 CU_ASSERT(g_unregister_rc == 0); 5927 5928 free_bdev(bdev); 5929 } 5930 5931 static int 5932 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5933 { 5934 int *count = ctx; 5935 5936 (*count)++; 5937 5938 return 0; 5939 } 5940 5941 static void 5942 for_each_bdev_test(void) 5943 { 5944 struct spdk_bdev *bdev[8]; 5945 int rc, count; 5946 5947 bdev[0] = allocate_bdev("bdev0"); 5948 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 5949 5950 bdev[1] = allocate_bdev("bdev1"); 5951 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5952 CU_ASSERT(rc == 0); 5953 5954 bdev[2] = allocate_bdev("bdev2"); 5955 5956 bdev[3] = allocate_bdev("bdev3"); 5957 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5958 CU_ASSERT(rc == 0); 5959 5960 bdev[4] = allocate_bdev("bdev4"); 5961 5962 bdev[5] = allocate_bdev("bdev5"); 5963 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5964 CU_ASSERT(rc == 0); 5965 5966 bdev[6] = allocate_bdev("bdev6"); 5967 5968 bdev[7] = allocate_bdev("bdev7"); 5969 5970 count = 0; 5971 rc = spdk_for_each_bdev(&count, count_bdevs); 5972 CU_ASSERT(rc == 0); 5973 CU_ASSERT(count == 7); 5974 5975 count = 0; 5976 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5977 CU_ASSERT(rc == 0); 5978 CU_ASSERT(count == 4); 5979 5980 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 5981 free_bdev(bdev[0]); 5982 free_bdev(bdev[1]); 5983 free_bdev(bdev[2]); 5984 free_bdev(bdev[3]); 5985 free_bdev(bdev[4]); 5986 free_bdev(bdev[5]); 5987 free_bdev(bdev[6]); 5988 free_bdev(bdev[7]); 5989 } 5990 5991 static void 5992 bdev_seek_test(void) 5993 { 5994 struct spdk_bdev *bdev; 5995 struct spdk_bdev_desc *desc = NULL; 5996 struct spdk_io_channel *io_ch; 5997 int rc; 5998 5999 ut_init_bdev(NULL); 6000 poll_threads(); 6001 6002 bdev = allocate_bdev("bdev0"); 6003 6004 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6005 CU_ASSERT(rc == 0); 6006 poll_threads(); 6007 SPDK_CU_ASSERT_FATAL(desc != NULL); 6008 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6009 io_ch = spdk_bdev_get_io_channel(desc); 6010 CU_ASSERT(io_ch != NULL); 6011 6012 /* Seek data not supported */ 6013 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6014 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6015 CU_ASSERT(rc == 0); 6016 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6017 poll_threads(); 6018 CU_ASSERT(g_seek_offset == 0); 6019 6020 /* Seek hole not supported */ 6021 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6022 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6023 CU_ASSERT(rc == 0); 6024 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6025 poll_threads(); 6026 CU_ASSERT(g_seek_offset == UINT64_MAX); 6027 6028 /* Seek data supported */ 6029 g_seek_data_offset = 12345; 6030 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6031 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6032 CU_ASSERT(rc == 0); 6033 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6034 stub_complete_io(1); 6035 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6036 CU_ASSERT(g_seek_offset == 12345); 6037 6038 /* Seek hole supported */ 6039 g_seek_hole_offset = 67890; 6040 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6041 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6042 CU_ASSERT(rc == 0); 6043 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6044 stub_complete_io(1); 6045 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6046 CU_ASSERT(g_seek_offset == 67890); 6047 6048 spdk_put_io_channel(io_ch); 6049 spdk_bdev_close(desc); 6050 free_bdev(bdev); 6051 ut_fini_bdev(); 6052 } 6053 6054 static void 6055 bdev_copy(void) 6056 { 6057 struct spdk_bdev *bdev; 6058 struct spdk_bdev_desc *desc = NULL; 6059 struct spdk_io_channel *ioch; 6060 struct ut_expected_io *expected_io; 6061 uint64_t src_offset, num_blocks; 6062 uint32_t num_completed; 6063 int rc; 6064 6065 ut_init_bdev(NULL); 6066 bdev = allocate_bdev("bdev"); 6067 6068 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6069 CU_ASSERT_EQUAL(rc, 0); 6070 SPDK_CU_ASSERT_FATAL(desc != NULL); 6071 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6072 ioch = spdk_bdev_get_io_channel(desc); 6073 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6074 6075 fn_table.submit_request = stub_submit_request; 6076 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6077 6078 /* First test that if the bdev supports copy, the request won't be split */ 6079 bdev->md_len = 0; 6080 bdev->blocklen = 512; 6081 num_blocks = 128; 6082 src_offset = bdev->blockcnt - num_blocks; 6083 6084 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6085 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6086 6087 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6088 CU_ASSERT_EQUAL(rc, 0); 6089 num_completed = stub_complete_io(1); 6090 CU_ASSERT_EQUAL(num_completed, 1); 6091 6092 /* Check that if copy is not supported it'll still work */ 6093 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6094 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6095 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6096 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6097 6098 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6099 6100 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6101 CU_ASSERT_EQUAL(rc, 0); 6102 num_completed = stub_complete_io(1); 6103 CU_ASSERT_EQUAL(num_completed, 1); 6104 num_completed = stub_complete_io(1); 6105 CU_ASSERT_EQUAL(num_completed, 1); 6106 6107 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6108 spdk_put_io_channel(ioch); 6109 spdk_bdev_close(desc); 6110 free_bdev(bdev); 6111 ut_fini_bdev(); 6112 } 6113 6114 static void 6115 bdev_copy_split_test(void) 6116 { 6117 struct spdk_bdev *bdev; 6118 struct spdk_bdev_desc *desc = NULL; 6119 struct spdk_io_channel *ioch; 6120 struct spdk_bdev_channel *bdev_ch; 6121 struct ut_expected_io *expected_io; 6122 struct spdk_bdev_opts bdev_opts = {}; 6123 uint32_t i, num_outstanding; 6124 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6125 int rc; 6126 6127 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6128 bdev_opts.bdev_io_pool_size = 512; 6129 bdev_opts.bdev_io_cache_size = 64; 6130 rc = spdk_bdev_set_opts(&bdev_opts); 6131 CU_ASSERT(rc == 0); 6132 6133 ut_init_bdev(NULL); 6134 bdev = allocate_bdev("bdev"); 6135 6136 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6137 CU_ASSERT_EQUAL(rc, 0); 6138 SPDK_CU_ASSERT_FATAL(desc != NULL); 6139 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6140 ioch = spdk_bdev_get_io_channel(desc); 6141 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6142 bdev_ch = spdk_io_channel_get_ctx(ioch); 6143 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6144 6145 fn_table.submit_request = stub_submit_request; 6146 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6147 6148 /* Case 1: First test the request won't be split */ 6149 num_blocks = 32; 6150 src_offset = bdev->blockcnt - num_blocks; 6151 6152 g_io_done = false; 6153 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6154 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6155 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6156 CU_ASSERT_EQUAL(rc, 0); 6157 CU_ASSERT(g_io_done == false); 6158 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6159 stub_complete_io(1); 6160 CU_ASSERT(g_io_done == true); 6161 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6162 6163 /* Case 2: Test the split with 2 children requests */ 6164 max_copy_blocks = 8; 6165 bdev->max_copy = max_copy_blocks; 6166 num_children = 2; 6167 num_blocks = max_copy_blocks * num_children; 6168 offset = 0; 6169 src_offset = bdev->blockcnt - num_blocks; 6170 6171 g_io_done = false; 6172 for (i = 0; i < num_children; i++) { 6173 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6174 src_offset + offset, max_copy_blocks); 6175 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6176 offset += max_copy_blocks; 6177 } 6178 6179 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6180 CU_ASSERT_EQUAL(rc, 0); 6181 CU_ASSERT(g_io_done == false); 6182 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6183 stub_complete_io(num_children); 6184 CU_ASSERT(g_io_done == true); 6185 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6186 6187 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6188 num_children = 15; 6189 num_blocks = max_copy_blocks * num_children; 6190 offset = 0; 6191 src_offset = bdev->blockcnt - num_blocks; 6192 6193 g_io_done = false; 6194 for (i = 0; i < num_children; i++) { 6195 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6196 src_offset + offset, max_copy_blocks); 6197 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6198 offset += max_copy_blocks; 6199 } 6200 6201 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6202 CU_ASSERT_EQUAL(rc, 0); 6203 CU_ASSERT(g_io_done == false); 6204 6205 while (num_children > 0) { 6206 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6207 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6208 stub_complete_io(num_outstanding); 6209 num_children -= num_outstanding; 6210 } 6211 CU_ASSERT(g_io_done == true); 6212 6213 spdk_put_io_channel(ioch); 6214 spdk_bdev_close(desc); 6215 free_bdev(bdev); 6216 ut_fini_bdev(); 6217 } 6218 6219 static void 6220 examine_claim_v1(struct spdk_bdev *bdev) 6221 { 6222 int rc; 6223 6224 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6225 CU_ASSERT(rc == 0); 6226 } 6227 6228 static void 6229 examine_no_lock_held(struct spdk_bdev *bdev) 6230 { 6231 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6232 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6233 } 6234 6235 struct examine_claim_v2_ctx { 6236 struct ut_examine_ctx examine_ctx; 6237 enum spdk_bdev_claim_type claim_type; 6238 struct spdk_bdev_desc *desc; 6239 }; 6240 6241 static void 6242 examine_claim_v2(struct spdk_bdev *bdev) 6243 { 6244 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6245 int rc; 6246 6247 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6248 CU_ASSERT(rc == 0); 6249 6250 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6251 CU_ASSERT(rc == 0); 6252 } 6253 6254 static void 6255 examine_locks(void) 6256 { 6257 struct spdk_bdev *bdev; 6258 struct ut_examine_ctx ctx = { 0 }; 6259 struct examine_claim_v2_ctx v2_ctx; 6260 6261 /* Without any claims, one code path is taken */ 6262 ctx.examine_config = examine_no_lock_held; 6263 ctx.examine_disk = examine_no_lock_held; 6264 bdev = allocate_bdev_ctx("bdev0", &ctx); 6265 CU_ASSERT(ctx.examine_config_count == 1); 6266 CU_ASSERT(ctx.examine_disk_count == 1); 6267 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6268 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6269 free_bdev(bdev); 6270 6271 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6272 memset(&ctx, 0, sizeof(ctx)); 6273 ctx.examine_config = examine_claim_v1; 6274 ctx.examine_disk = examine_no_lock_held; 6275 bdev = allocate_bdev_ctx("bdev0", &ctx); 6276 CU_ASSERT(ctx.examine_config_count == 1); 6277 CU_ASSERT(ctx.examine_disk_count == 1); 6278 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6279 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6280 spdk_bdev_module_release_bdev(bdev); 6281 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6282 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6283 free_bdev(bdev); 6284 6285 /* Exercise the final path that comes with v2 claims. */ 6286 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6287 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6288 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6289 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6290 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6291 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6292 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6293 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6294 spdk_bdev_close(v2_ctx.desc); 6295 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6296 free_bdev(bdev); 6297 } 6298 6299 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6300 do { \ 6301 uint32_t len = 0; \ 6302 struct spdk_bdev_module_claim *claim; \ 6303 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6304 len++; \ 6305 } \ 6306 CU_ASSERT(len == expect); \ 6307 } while (0) 6308 6309 static void 6310 claim_v2_rwo(void) 6311 { 6312 struct spdk_bdev *bdev; 6313 struct spdk_bdev_desc *desc; 6314 struct spdk_bdev_desc *desc2; 6315 struct spdk_bdev_claim_opts opts; 6316 int rc; 6317 6318 bdev = allocate_bdev("bdev0"); 6319 6320 /* Claim without options */ 6321 desc = NULL; 6322 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6323 CU_ASSERT(rc == 0); 6324 SPDK_CU_ASSERT_FATAL(desc != NULL); 6325 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6326 &bdev_ut_if); 6327 CU_ASSERT(rc == 0); 6328 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6329 CU_ASSERT(desc->claim != NULL); 6330 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6331 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6332 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6333 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6334 6335 /* Release the claim by closing the descriptor */ 6336 spdk_bdev_close(desc); 6337 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6338 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6339 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6340 6341 /* Claim with options */ 6342 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6343 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6344 desc = NULL; 6345 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6346 CU_ASSERT(rc == 0); 6347 SPDK_CU_ASSERT_FATAL(desc != NULL); 6348 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6349 &bdev_ut_if); 6350 CU_ASSERT(rc == 0); 6351 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6352 CU_ASSERT(desc->claim != NULL); 6353 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6354 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6355 memset(&opts, 0, sizeof(opts)); 6356 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6357 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6358 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6359 6360 /* The claim blocks new writers. */ 6361 desc2 = NULL; 6362 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6363 CU_ASSERT(rc == -EPERM); 6364 CU_ASSERT(desc2 == NULL); 6365 6366 /* New readers are allowed */ 6367 desc2 = NULL; 6368 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6369 CU_ASSERT(rc == 0); 6370 CU_ASSERT(desc2 != NULL); 6371 CU_ASSERT(!desc2->write); 6372 6373 /* No new v2 RWO claims are allowed */ 6374 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6375 &bdev_ut_if); 6376 CU_ASSERT(rc == -EPERM); 6377 6378 /* No new v2 ROM claims are allowed */ 6379 CU_ASSERT(!desc2->write); 6380 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6381 &bdev_ut_if); 6382 CU_ASSERT(rc == -EPERM); 6383 CU_ASSERT(!desc2->write); 6384 6385 /* No new v2 RWM claims are allowed */ 6386 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6387 opts.shared_claim_key = (uint64_t)&opts; 6388 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6389 &bdev_ut_if); 6390 CU_ASSERT(rc == -EPERM); 6391 CU_ASSERT(!desc2->write); 6392 6393 /* No new v1 claims are allowed */ 6394 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6395 CU_ASSERT(rc == -EPERM); 6396 6397 /* None of the above changed the existing claim */ 6398 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6399 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6400 6401 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6402 spdk_bdev_close(desc); 6403 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6404 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6405 CU_ASSERT(!desc2->write); 6406 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6407 &bdev_ut_if); 6408 CU_ASSERT(rc == 0); 6409 CU_ASSERT(desc2->claim != NULL); 6410 CU_ASSERT(desc2->write); 6411 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6412 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6413 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6414 spdk_bdev_close(desc2); 6415 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6416 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6417 6418 /* Cannot claim with a key */ 6419 desc = NULL; 6420 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6421 CU_ASSERT(rc == 0); 6422 SPDK_CU_ASSERT_FATAL(desc != NULL); 6423 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6424 opts.shared_claim_key = (uint64_t)&opts; 6425 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6426 &bdev_ut_if); 6427 CU_ASSERT(rc == -EINVAL); 6428 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6429 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6430 spdk_bdev_close(desc); 6431 6432 /* Clean up */ 6433 free_bdev(bdev); 6434 } 6435 6436 static void 6437 claim_v2_rom(void) 6438 { 6439 struct spdk_bdev *bdev; 6440 struct spdk_bdev_desc *desc; 6441 struct spdk_bdev_desc *desc2; 6442 struct spdk_bdev_claim_opts opts; 6443 int rc; 6444 6445 bdev = allocate_bdev("bdev0"); 6446 6447 /* Claim without options */ 6448 desc = NULL; 6449 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6450 CU_ASSERT(rc == 0); 6451 SPDK_CU_ASSERT_FATAL(desc != NULL); 6452 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6453 &bdev_ut_if); 6454 CU_ASSERT(rc == 0); 6455 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6456 CU_ASSERT(desc->claim != NULL); 6457 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6458 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6459 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6460 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6461 6462 /* Release the claim by closing the descriptor */ 6463 spdk_bdev_close(desc); 6464 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6465 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6466 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6467 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6468 6469 /* Claim with options */ 6470 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6471 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6472 desc = NULL; 6473 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6474 CU_ASSERT(rc == 0); 6475 SPDK_CU_ASSERT_FATAL(desc != NULL); 6476 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6477 &bdev_ut_if); 6478 CU_ASSERT(rc == 0); 6479 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6480 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6481 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6482 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6483 memset(&opts, 0, sizeof(opts)); 6484 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6485 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6486 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6487 6488 /* The claim blocks new writers. */ 6489 desc2 = NULL; 6490 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6491 CU_ASSERT(rc == -EPERM); 6492 CU_ASSERT(desc2 == NULL); 6493 6494 /* New readers are allowed */ 6495 desc2 = NULL; 6496 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6497 CU_ASSERT(rc == 0); 6498 CU_ASSERT(desc2 != NULL); 6499 CU_ASSERT(!desc2->write); 6500 6501 /* No new v2 RWO claims are allowed */ 6502 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6503 &bdev_ut_if); 6504 CU_ASSERT(rc == -EPERM); 6505 6506 /* No new v2 RWM claims are allowed */ 6507 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6508 opts.shared_claim_key = (uint64_t)&opts; 6509 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6510 &bdev_ut_if); 6511 CU_ASSERT(rc == -EPERM); 6512 CU_ASSERT(!desc2->write); 6513 6514 /* No new v1 claims are allowed */ 6515 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6516 CU_ASSERT(rc == -EPERM); 6517 6518 /* None of the above messed up the existing claim */ 6519 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6520 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6521 6522 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 6523 CU_ASSERT(!desc2->write); 6524 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6525 &bdev_ut_if); 6526 CU_ASSERT(rc == 0); 6527 CU_ASSERT(!desc2->write); 6528 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6529 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6530 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6531 6532 /* Claim remains when closing the first descriptor */ 6533 spdk_bdev_close(desc); 6534 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6535 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6536 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6537 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6538 6539 /* Claim removed when closing the other descriptor */ 6540 spdk_bdev_close(desc2); 6541 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6542 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6543 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6544 6545 /* Cannot claim with a key */ 6546 desc = NULL; 6547 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6548 CU_ASSERT(rc == 0); 6549 SPDK_CU_ASSERT_FATAL(desc != NULL); 6550 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6551 opts.shared_claim_key = (uint64_t)&opts; 6552 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6553 &bdev_ut_if); 6554 CU_ASSERT(rc == -EINVAL); 6555 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6556 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6557 spdk_bdev_close(desc); 6558 6559 /* Cannot claim with a read-write descriptor */ 6560 desc = NULL; 6561 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6562 CU_ASSERT(rc == 0); 6563 SPDK_CU_ASSERT_FATAL(desc != NULL); 6564 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6565 &bdev_ut_if); 6566 CU_ASSERT(rc == -EINVAL); 6567 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6568 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6569 spdk_bdev_close(desc); 6570 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6571 6572 /* Clean up */ 6573 free_bdev(bdev); 6574 } 6575 6576 static void 6577 claim_v2_rwm(void) 6578 { 6579 struct spdk_bdev *bdev; 6580 struct spdk_bdev_desc *desc; 6581 struct spdk_bdev_desc *desc2; 6582 struct spdk_bdev_claim_opts opts; 6583 char good_key, bad_key; 6584 int rc; 6585 6586 bdev = allocate_bdev("bdev0"); 6587 6588 /* Claim without options should fail */ 6589 desc = NULL; 6590 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6591 CU_ASSERT(rc == 0); 6592 SPDK_CU_ASSERT_FATAL(desc != NULL); 6593 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 6594 &bdev_ut_if); 6595 CU_ASSERT(rc == -EINVAL); 6596 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6597 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6598 CU_ASSERT(desc->claim == NULL); 6599 6600 /* Claim with options */ 6601 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6602 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6603 opts.shared_claim_key = (uint64_t)&good_key; 6604 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6605 &bdev_ut_if); 6606 CU_ASSERT(rc == 0); 6607 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6608 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6609 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6610 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6611 memset(&opts, 0, sizeof(opts)); 6612 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6613 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6614 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6615 6616 /* The claim blocks new writers. */ 6617 desc2 = NULL; 6618 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6619 CU_ASSERT(rc == -EPERM); 6620 CU_ASSERT(desc2 == NULL); 6621 6622 /* New readers are allowed */ 6623 desc2 = NULL; 6624 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6625 CU_ASSERT(rc == 0); 6626 CU_ASSERT(desc2 != NULL); 6627 CU_ASSERT(!desc2->write); 6628 6629 /* No new v2 RWO claims are allowed */ 6630 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6631 &bdev_ut_if); 6632 CU_ASSERT(rc == -EPERM); 6633 6634 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 6635 CU_ASSERT(!desc2->write); 6636 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6637 &bdev_ut_if); 6638 CU_ASSERT(rc == -EPERM); 6639 CU_ASSERT(!desc2->write); 6640 6641 /* No new v1 claims are allowed */ 6642 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6643 CU_ASSERT(rc == -EPERM); 6644 6645 /* No new v2 RWM claims are allowed if the key does not match */ 6646 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6647 opts.shared_claim_key = (uint64_t)&bad_key; 6648 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6649 &bdev_ut_if); 6650 CU_ASSERT(rc == -EPERM); 6651 CU_ASSERT(!desc2->write); 6652 6653 /* None of the above messed up the existing claim */ 6654 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6655 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6656 6657 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 6658 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6659 opts.shared_claim_key = (uint64_t)&good_key; 6660 CU_ASSERT(!desc2->write); 6661 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6662 &bdev_ut_if); 6663 CU_ASSERT(rc == 0); 6664 CU_ASSERT(desc2->write); 6665 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6666 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6667 6668 /* Claim remains when closing the first descriptor */ 6669 spdk_bdev_close(desc); 6670 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6671 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6672 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6673 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6674 6675 /* Claim removed when closing the other descriptor */ 6676 spdk_bdev_close(desc2); 6677 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6678 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6679 6680 /* Cannot claim without a key */ 6681 desc = NULL; 6682 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6683 CU_ASSERT(rc == 0); 6684 SPDK_CU_ASSERT_FATAL(desc != NULL); 6685 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6686 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6687 &bdev_ut_if); 6688 CU_ASSERT(rc == -EINVAL); 6689 spdk_bdev_close(desc); 6690 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6691 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6692 6693 /* Clean up */ 6694 free_bdev(bdev); 6695 } 6696 6697 static void 6698 claim_v2_existing_writer(void) 6699 { 6700 struct spdk_bdev *bdev; 6701 struct spdk_bdev_desc *desc; 6702 struct spdk_bdev_desc *desc2; 6703 struct spdk_bdev_claim_opts opts; 6704 enum spdk_bdev_claim_type type; 6705 enum spdk_bdev_claim_type types[] = { 6706 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6707 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6708 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6709 }; 6710 size_t i; 6711 int rc; 6712 6713 bdev = allocate_bdev("bdev0"); 6714 6715 desc = NULL; 6716 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6717 CU_ASSERT(rc == 0); 6718 SPDK_CU_ASSERT_FATAL(desc != NULL); 6719 desc2 = NULL; 6720 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6721 CU_ASSERT(rc == 0); 6722 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 6723 6724 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6725 type = types[i]; 6726 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6727 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6728 opts.shared_claim_key = (uint64_t)&opts; 6729 } 6730 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6731 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 6732 CU_ASSERT(rc == -EINVAL); 6733 } else { 6734 CU_ASSERT(rc == -EPERM); 6735 } 6736 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6737 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 6738 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 6739 CU_ASSERT(rc == -EINVAL); 6740 } else { 6741 CU_ASSERT(rc == -EPERM); 6742 } 6743 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6744 } 6745 6746 spdk_bdev_close(desc); 6747 spdk_bdev_close(desc2); 6748 6749 /* Clean up */ 6750 free_bdev(bdev); 6751 } 6752 6753 static void 6754 claim_v2_existing_v1(void) 6755 { 6756 struct spdk_bdev *bdev; 6757 struct spdk_bdev_desc *desc; 6758 struct spdk_bdev_claim_opts opts; 6759 enum spdk_bdev_claim_type type; 6760 enum spdk_bdev_claim_type types[] = { 6761 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6762 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6763 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6764 }; 6765 size_t i; 6766 int rc; 6767 6768 bdev = allocate_bdev("bdev0"); 6769 6770 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6771 CU_ASSERT(rc == 0); 6772 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6773 6774 desc = NULL; 6775 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6776 CU_ASSERT(rc == 0); 6777 SPDK_CU_ASSERT_FATAL(desc != NULL); 6778 6779 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6780 type = types[i]; 6781 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6782 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6783 opts.shared_claim_key = (uint64_t)&opts; 6784 } 6785 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6786 CU_ASSERT(rc == -EPERM); 6787 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6788 } 6789 6790 spdk_bdev_module_release_bdev(bdev); 6791 spdk_bdev_close(desc); 6792 6793 /* Clean up */ 6794 free_bdev(bdev); 6795 } 6796 6797 static void 6798 claim_v1_existing_v2(void) 6799 { 6800 struct spdk_bdev *bdev; 6801 struct spdk_bdev_desc *desc; 6802 struct spdk_bdev_claim_opts opts; 6803 enum spdk_bdev_claim_type type; 6804 enum spdk_bdev_claim_type types[] = { 6805 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6806 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6807 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6808 }; 6809 size_t i; 6810 int rc; 6811 6812 bdev = allocate_bdev("bdev0"); 6813 6814 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6815 type = types[i]; 6816 6817 desc = NULL; 6818 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6819 CU_ASSERT(rc == 0); 6820 SPDK_CU_ASSERT_FATAL(desc != NULL); 6821 6822 /* Get a v2 claim */ 6823 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6824 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6825 opts.shared_claim_key = (uint64_t)&opts; 6826 } 6827 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6828 CU_ASSERT(rc == 0); 6829 6830 /* Fail to get a v1 claim */ 6831 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6832 CU_ASSERT(rc == -EPERM); 6833 6834 spdk_bdev_close(desc); 6835 6836 /* Now v1 succeeds */ 6837 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6838 CU_ASSERT(rc == 0) 6839 spdk_bdev_module_release_bdev(bdev); 6840 } 6841 6842 /* Clean up */ 6843 free_bdev(bdev); 6844 } 6845 6846 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 6847 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 6848 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 6849 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 6850 6851 #define UT_MAX_EXAMINE_MODS 2 6852 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 6853 { 6854 .name = "vbdev_ut_examine0", 6855 .module_init = vbdev_ut_module_init, 6856 .module_fini = vbdev_ut_module_fini, 6857 .examine_config = ut_examine_claimed_config0, 6858 .examine_disk = ut_examine_claimed_disk0, 6859 }, 6860 { 6861 .name = "vbdev_ut_examine1", 6862 .module_init = vbdev_ut_module_init, 6863 .module_fini = vbdev_ut_module_fini, 6864 .examine_config = ut_examine_claimed_config1, 6865 .examine_disk = ut_examine_claimed_disk1, 6866 } 6867 }; 6868 6869 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 6870 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 6871 6872 struct ut_examine_claimed_ctx { 6873 uint32_t examine_config_count; 6874 uint32_t examine_disk_count; 6875 6876 /* Claim type to take, with these options */ 6877 enum spdk_bdev_claim_type claim_type; 6878 struct spdk_bdev_claim_opts claim_opts; 6879 6880 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 6881 int expect_claim_err; 6882 6883 /* Descriptor used for a claim */ 6884 struct spdk_bdev_desc *desc; 6885 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 6886 6887 bool ut_testing_examine_claimed; 6888 6889 static void 6890 reset_examine_claimed_ctx(void) 6891 { 6892 struct ut_examine_claimed_ctx *ctx; 6893 uint32_t i; 6894 6895 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 6896 ctx = &examine_claimed_ctx[i]; 6897 if (ctx->desc != NULL) { 6898 spdk_bdev_close(ctx->desc); 6899 } 6900 memset(ctx, 0, sizeof(*ctx)); 6901 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 6902 } 6903 } 6904 6905 static void 6906 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 6907 { 6908 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 6909 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 6910 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 6911 int rc; 6912 6913 if (!ut_testing_examine_claimed) { 6914 spdk_bdev_module_examine_done(module); 6915 return; 6916 } 6917 6918 ctx->examine_config_count++; 6919 6920 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 6921 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 6922 &ctx->desc); 6923 CU_ASSERT(rc == 0); 6924 6925 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 6926 CU_ASSERT(rc == ctx->expect_claim_err); 6927 } 6928 spdk_bdev_module_examine_done(module); 6929 } 6930 6931 static void 6932 ut_examine_claimed_config0(struct spdk_bdev *bdev) 6933 { 6934 examine_claimed_config(bdev, 0); 6935 } 6936 6937 static void 6938 ut_examine_claimed_config1(struct spdk_bdev *bdev) 6939 { 6940 examine_claimed_config(bdev, 1); 6941 } 6942 6943 static void 6944 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 6945 { 6946 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 6947 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 6948 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 6949 6950 if (!ut_testing_examine_claimed) { 6951 spdk_bdev_module_examine_done(module); 6952 return; 6953 } 6954 6955 ctx->examine_disk_count++; 6956 6957 spdk_bdev_module_examine_done(module); 6958 } 6959 6960 static void 6961 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 6962 { 6963 examine_claimed_disk(bdev, 0); 6964 } 6965 6966 static void 6967 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 6968 { 6969 examine_claimed_disk(bdev, 1); 6970 } 6971 6972 static void 6973 examine_claimed(void) 6974 { 6975 struct spdk_bdev *bdev; 6976 struct spdk_bdev_module *mod = examine_claimed_mods; 6977 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 6978 6979 ut_testing_examine_claimed = true; 6980 reset_examine_claimed_ctx(); 6981 6982 /* 6983 * With one module claiming, both modules' examine_config should be called, but only the 6984 * claiming module's examine_disk should be called. 6985 */ 6986 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6987 bdev = allocate_bdev("bdev0"); 6988 CU_ASSERT(ctx[0].examine_config_count == 1); 6989 CU_ASSERT(ctx[0].examine_disk_count == 1); 6990 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 6991 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 6992 CU_ASSERT(ctx[1].examine_config_count == 1); 6993 CU_ASSERT(ctx[1].examine_disk_count == 0); 6994 CU_ASSERT(ctx[1].desc == NULL); 6995 reset_examine_claimed_ctx(); 6996 free_bdev(bdev); 6997 6998 /* 6999 * With two modules claiming, both modules' examine_config and examine_disk should be 7000 * called. 7001 */ 7002 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7003 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7004 bdev = allocate_bdev("bdev0"); 7005 CU_ASSERT(ctx[0].examine_config_count == 1); 7006 CU_ASSERT(ctx[0].examine_disk_count == 1); 7007 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7008 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7009 CU_ASSERT(ctx[1].examine_config_count == 1); 7010 CU_ASSERT(ctx[1].examine_disk_count == 1); 7011 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7012 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7013 reset_examine_claimed_ctx(); 7014 free_bdev(bdev); 7015 7016 /* 7017 * If two vbdev modules try to claim with conflicting claim types, the module that was added 7018 * last wins. The winner gets the claim and is the only one that has its examine_disk 7019 * callback invoked. 7020 */ 7021 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7022 ctx[0].expect_claim_err = -EPERM; 7023 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 7024 bdev = allocate_bdev("bdev0"); 7025 CU_ASSERT(ctx[0].examine_config_count == 1); 7026 CU_ASSERT(ctx[0].examine_disk_count == 0); 7027 CU_ASSERT(ctx[1].examine_config_count == 1); 7028 CU_ASSERT(ctx[1].examine_disk_count == 1); 7029 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7030 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7031 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7032 reset_examine_claimed_ctx(); 7033 free_bdev(bdev); 7034 7035 ut_testing_examine_claimed = false; 7036 } 7037 7038 int 7039 main(int argc, char **argv) 7040 { 7041 CU_pSuite suite = NULL; 7042 unsigned int num_failures; 7043 7044 CU_set_error_action(CUEA_ABORT); 7045 CU_initialize_registry(); 7046 7047 suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown); 7048 7049 CU_ADD_TEST(suite, bytes_to_blocks_test); 7050 CU_ADD_TEST(suite, num_blocks_test); 7051 CU_ADD_TEST(suite, io_valid_test); 7052 CU_ADD_TEST(suite, open_write_test); 7053 CU_ADD_TEST(suite, claim_test); 7054 CU_ADD_TEST(suite, alias_add_del_test); 7055 CU_ADD_TEST(suite, get_device_stat_test); 7056 CU_ADD_TEST(suite, bdev_io_types_test); 7057 CU_ADD_TEST(suite, bdev_io_wait_test); 7058 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7059 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7060 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7061 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7062 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7063 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7064 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7065 CU_ADD_TEST(suite, bdev_io_alignment); 7066 CU_ADD_TEST(suite, bdev_histograms); 7067 CU_ADD_TEST(suite, bdev_write_zeroes); 7068 CU_ADD_TEST(suite, bdev_compare_and_write); 7069 CU_ADD_TEST(suite, bdev_compare); 7070 CU_ADD_TEST(suite, bdev_compare_emulated); 7071 CU_ADD_TEST(suite, bdev_zcopy_write); 7072 CU_ADD_TEST(suite, bdev_zcopy_read); 7073 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7074 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7075 CU_ADD_TEST(suite, bdev_open_ext); 7076 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7077 CU_ADD_TEST(suite, bdev_set_io_timeout); 7078 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7079 CU_ADD_TEST(suite, lba_range_overlap); 7080 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7081 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7082 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7083 CU_ADD_TEST(suite, bdev_io_abort); 7084 CU_ADD_TEST(suite, bdev_unmap); 7085 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7086 CU_ADD_TEST(suite, bdev_set_options_test); 7087 CU_ADD_TEST(suite, bdev_multi_allocation); 7088 CU_ADD_TEST(suite, bdev_get_memory_domains); 7089 CU_ADD_TEST(suite, bdev_io_ext); 7090 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7091 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7092 CU_ADD_TEST(suite, bdev_io_ext_split); 7093 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7094 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7095 CU_ADD_TEST(suite, bdev_unregister_by_name); 7096 CU_ADD_TEST(suite, for_each_bdev_test); 7097 CU_ADD_TEST(suite, bdev_seek_test); 7098 CU_ADD_TEST(suite, bdev_copy); 7099 CU_ADD_TEST(suite, bdev_copy_split_test); 7100 CU_ADD_TEST(suite, examine_locks); 7101 CU_ADD_TEST(suite, claim_v2_rwo); 7102 CU_ADD_TEST(suite, claim_v2_rom); 7103 CU_ADD_TEST(suite, claim_v2_rwm); 7104 CU_ADD_TEST(suite, claim_v2_existing_writer); 7105 CU_ADD_TEST(suite, claim_v2_existing_v1); 7106 CU_ADD_TEST(suite, claim_v1_existing_v2); 7107 CU_ADD_TEST(suite, examine_claimed); 7108 7109 allocate_cores(1); 7110 allocate_threads(1); 7111 set_thread(0); 7112 7113 CU_basic_set_mode(CU_BRM_VERBOSE); 7114 CU_basic_run_tests(); 7115 num_failures = CU_get_number_of_failures(); 7116 CU_cleanup_registry(); 7117 7118 free_threads(); 7119 free_cores(); 7120 7121 return num_failures; 7122 } 7123