1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 DEFINE_STUB(spdk_accel_sequence_finish, int, 25 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 26 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 27 DEFINE_STUB(spdk_accel_append_copy, int, 28 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs, 29 uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 30 struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, 31 void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 32 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL); 33 34 static bool g_memory_domain_pull_data_called; 35 static bool g_memory_domain_push_data_called; 36 static int g_accel_io_device; 37 38 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 39 int 40 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 41 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 42 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 43 { 44 g_memory_domain_pull_data_called = true; 45 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 46 cpl_cb(cpl_cb_arg, 0); 47 return 0; 48 } 49 50 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 51 int 52 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 53 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 54 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 55 { 56 g_memory_domain_push_data_called = true; 57 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 58 cpl_cb(cpl_cb_arg, 0); 59 return 0; 60 } 61 62 struct spdk_io_channel * 63 spdk_accel_get_io_channel(void) 64 { 65 return spdk_get_io_channel(&g_accel_io_device); 66 } 67 68 int g_status; 69 int g_count; 70 enum spdk_bdev_event_type g_event_type1; 71 enum spdk_bdev_event_type g_event_type2; 72 enum spdk_bdev_event_type g_event_type3; 73 enum spdk_bdev_event_type g_event_type4; 74 struct spdk_histogram_data *g_histogram; 75 void *g_unregister_arg; 76 int g_unregister_rc; 77 78 void 79 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 80 int *sc, int *sk, int *asc, int *ascq) 81 { 82 } 83 84 static int 85 ut_accel_ch_create_cb(void *io_device, void *ctx) 86 { 87 return 0; 88 } 89 90 static void 91 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 92 { 93 } 94 95 static int 96 ut_bdev_setup(void) 97 { 98 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 99 ut_accel_ch_destroy_cb, 0, NULL); 100 return 0; 101 } 102 103 static int 104 ut_bdev_teardown(void) 105 { 106 spdk_io_device_unregister(&g_accel_io_device, NULL); 107 108 return 0; 109 } 110 111 static int 112 stub_destruct(void *ctx) 113 { 114 return 0; 115 } 116 117 struct ut_expected_io { 118 uint8_t type; 119 uint64_t offset; 120 uint64_t src_offset; 121 uint64_t length; 122 int iovcnt; 123 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 124 void *md_buf; 125 TAILQ_ENTRY(ut_expected_io) link; 126 }; 127 128 struct bdev_ut_channel { 129 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 130 uint32_t outstanding_io_count; 131 TAILQ_HEAD(, ut_expected_io) expected_io; 132 }; 133 134 static bool g_io_done; 135 static struct spdk_bdev_io *g_bdev_io; 136 static enum spdk_bdev_io_status g_io_status; 137 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 138 static uint32_t g_bdev_ut_io_device; 139 static struct bdev_ut_channel *g_bdev_ut_channel; 140 static void *g_compare_read_buf; 141 static uint32_t g_compare_read_buf_len; 142 static void *g_compare_write_buf; 143 static uint32_t g_compare_write_buf_len; 144 static void *g_compare_md_buf; 145 static bool g_abort_done; 146 static enum spdk_bdev_io_status g_abort_status; 147 static void *g_zcopy_read_buf; 148 static uint32_t g_zcopy_read_buf_len; 149 static void *g_zcopy_write_buf; 150 static uint32_t g_zcopy_write_buf_len; 151 static struct spdk_bdev_io *g_zcopy_bdev_io; 152 static uint64_t g_seek_data_offset; 153 static uint64_t g_seek_hole_offset; 154 static uint64_t g_seek_offset; 155 156 static struct ut_expected_io * 157 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 158 { 159 struct ut_expected_io *expected_io; 160 161 expected_io = calloc(1, sizeof(*expected_io)); 162 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 163 164 expected_io->type = type; 165 expected_io->offset = offset; 166 expected_io->length = length; 167 expected_io->iovcnt = iovcnt; 168 169 return expected_io; 170 } 171 172 static struct ut_expected_io * 173 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 174 { 175 struct ut_expected_io *expected_io; 176 177 expected_io = calloc(1, sizeof(*expected_io)); 178 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 179 180 expected_io->type = type; 181 expected_io->offset = offset; 182 expected_io->src_offset = src_offset; 183 expected_io->length = length; 184 185 return expected_io; 186 } 187 188 static void 189 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 190 { 191 expected_io->iov[pos].iov_base = base; 192 expected_io->iov[pos].iov_len = len; 193 } 194 195 static void 196 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 197 { 198 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 199 struct ut_expected_io *expected_io; 200 struct iovec *iov, *expected_iov; 201 struct spdk_bdev_io *bio_to_abort; 202 int i; 203 204 g_bdev_io = bdev_io; 205 206 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 207 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 208 209 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 210 CU_ASSERT(g_compare_read_buf_len == len); 211 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 212 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 213 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 214 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 215 } 216 } 217 218 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 219 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 220 221 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 222 CU_ASSERT(g_compare_write_buf_len == len); 223 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 224 } 225 226 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 227 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 228 229 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 230 CU_ASSERT(g_compare_read_buf_len == len); 231 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 232 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 233 } 234 if (bdev_io->u.bdev.md_buf && 235 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 236 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 237 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 238 } 239 } 240 241 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 242 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 243 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 244 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 245 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 246 ch->outstanding_io_count--; 247 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 248 break; 249 } 250 } 251 } 252 } 253 254 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 255 if (bdev_io->u.bdev.zcopy.start) { 256 g_zcopy_bdev_io = bdev_io; 257 if (bdev_io->u.bdev.zcopy.populate) { 258 /* Start of a read */ 259 CU_ASSERT(g_zcopy_read_buf != NULL); 260 CU_ASSERT(g_zcopy_read_buf_len > 0); 261 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 262 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 263 bdev_io->u.bdev.iovcnt = 1; 264 } else { 265 /* Start of a write */ 266 CU_ASSERT(g_zcopy_write_buf != NULL); 267 CU_ASSERT(g_zcopy_write_buf_len > 0); 268 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 269 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 270 bdev_io->u.bdev.iovcnt = 1; 271 } 272 } else { 273 if (bdev_io->u.bdev.zcopy.commit) { 274 /* End of write */ 275 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 276 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 277 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 278 g_zcopy_write_buf = NULL; 279 g_zcopy_write_buf_len = 0; 280 } else { 281 /* End of read */ 282 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 283 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 284 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 285 g_zcopy_read_buf = NULL; 286 g_zcopy_read_buf_len = 0; 287 } 288 } 289 } 290 291 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 292 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 293 } 294 295 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 296 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 297 } 298 299 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 300 ch->outstanding_io_count++; 301 302 expected_io = TAILQ_FIRST(&ch->expected_io); 303 if (expected_io == NULL) { 304 return; 305 } 306 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 307 308 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 309 CU_ASSERT(bdev_io->type == expected_io->type); 310 } 311 312 if (expected_io->md_buf != NULL) { 313 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 314 } 315 316 if (expected_io->length == 0) { 317 free(expected_io); 318 return; 319 } 320 321 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 322 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 323 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 324 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 325 } 326 327 if (expected_io->iovcnt == 0) { 328 free(expected_io); 329 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 330 return; 331 } 332 333 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 334 for (i = 0; i < expected_io->iovcnt; i++) { 335 expected_iov = &expected_io->iov[i]; 336 if (bdev_io->internal.orig_iovcnt == 0) { 337 iov = &bdev_io->u.bdev.iovs[i]; 338 } else { 339 iov = bdev_io->internal.orig_iovs; 340 } 341 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 342 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 343 } 344 345 free(expected_io); 346 } 347 348 static void 349 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 350 struct spdk_bdev_io *bdev_io, bool success) 351 { 352 CU_ASSERT(success == true); 353 354 stub_submit_request(_ch, bdev_io); 355 } 356 357 static void 358 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 359 { 360 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 361 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 362 } 363 364 static uint32_t 365 stub_complete_io(uint32_t num_to_complete) 366 { 367 struct bdev_ut_channel *ch = g_bdev_ut_channel; 368 struct spdk_bdev_io *bdev_io; 369 static enum spdk_bdev_io_status io_status; 370 uint32_t num_completed = 0; 371 372 while (num_completed < num_to_complete) { 373 if (TAILQ_EMPTY(&ch->outstanding_io)) { 374 break; 375 } 376 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 377 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 378 ch->outstanding_io_count--; 379 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 380 g_io_exp_status; 381 spdk_bdev_io_complete(bdev_io, io_status); 382 num_completed++; 383 } 384 385 return num_completed; 386 } 387 388 static struct spdk_io_channel * 389 bdev_ut_get_io_channel(void *ctx) 390 { 391 return spdk_get_io_channel(&g_bdev_ut_io_device); 392 } 393 394 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 395 [SPDK_BDEV_IO_TYPE_READ] = true, 396 [SPDK_BDEV_IO_TYPE_WRITE] = true, 397 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 398 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 399 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 400 [SPDK_BDEV_IO_TYPE_RESET] = true, 401 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 402 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 403 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 404 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 405 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 406 [SPDK_BDEV_IO_TYPE_ABORT] = true, 407 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 408 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 409 [SPDK_BDEV_IO_TYPE_COPY] = true, 410 }; 411 412 static void 413 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 414 { 415 g_io_types_supported[io_type] = enable; 416 } 417 418 static bool 419 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 420 { 421 return g_io_types_supported[io_type]; 422 } 423 424 static struct spdk_bdev_fn_table fn_table = { 425 .destruct = stub_destruct, 426 .submit_request = stub_submit_request, 427 .get_io_channel = bdev_ut_get_io_channel, 428 .io_type_supported = stub_io_type_supported, 429 }; 430 431 static int 432 bdev_ut_create_ch(void *io_device, void *ctx_buf) 433 { 434 struct bdev_ut_channel *ch = ctx_buf; 435 436 CU_ASSERT(g_bdev_ut_channel == NULL); 437 g_bdev_ut_channel = ch; 438 439 TAILQ_INIT(&ch->outstanding_io); 440 ch->outstanding_io_count = 0; 441 TAILQ_INIT(&ch->expected_io); 442 return 0; 443 } 444 445 static void 446 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 447 { 448 CU_ASSERT(g_bdev_ut_channel != NULL); 449 g_bdev_ut_channel = NULL; 450 } 451 452 struct spdk_bdev_module bdev_ut_if; 453 454 static int 455 bdev_ut_module_init(void) 456 { 457 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 458 sizeof(struct bdev_ut_channel), NULL); 459 spdk_bdev_module_init_done(&bdev_ut_if); 460 return 0; 461 } 462 463 static void 464 bdev_ut_module_fini(void) 465 { 466 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 467 } 468 469 struct spdk_bdev_module bdev_ut_if = { 470 .name = "bdev_ut", 471 .module_init = bdev_ut_module_init, 472 .module_fini = bdev_ut_module_fini, 473 .async_init = true, 474 }; 475 476 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 477 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 478 479 static int 480 vbdev_ut_module_init(void) 481 { 482 return 0; 483 } 484 485 static void 486 vbdev_ut_module_fini(void) 487 { 488 } 489 490 struct spdk_bdev_module vbdev_ut_if = { 491 .name = "vbdev_ut", 492 .module_init = vbdev_ut_module_init, 493 .module_fini = vbdev_ut_module_fini, 494 .examine_config = vbdev_ut_examine_config, 495 .examine_disk = vbdev_ut_examine_disk, 496 }; 497 498 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 499 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 500 501 struct ut_examine_ctx { 502 void (*examine_config)(struct spdk_bdev *bdev); 503 void (*examine_disk)(struct spdk_bdev *bdev); 504 uint32_t examine_config_count; 505 uint32_t examine_disk_count; 506 }; 507 508 static void 509 vbdev_ut_examine_config(struct spdk_bdev *bdev) 510 { 511 struct ut_examine_ctx *ctx = bdev->ctxt; 512 513 if (ctx != NULL) { 514 ctx->examine_config_count++; 515 if (ctx->examine_config != NULL) { 516 ctx->examine_config(bdev); 517 } 518 } 519 520 spdk_bdev_module_examine_done(&vbdev_ut_if); 521 } 522 523 static void 524 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 525 { 526 struct ut_examine_ctx *ctx = bdev->ctxt; 527 528 if (ctx != NULL) { 529 ctx->examine_disk_count++; 530 if (ctx->examine_disk != NULL) { 531 ctx->examine_disk(bdev); 532 } 533 } 534 535 spdk_bdev_module_examine_done(&vbdev_ut_if); 536 } 537 538 static struct spdk_bdev * 539 allocate_bdev_ctx(char *name, void *ctx) 540 { 541 struct spdk_bdev *bdev; 542 int rc; 543 544 bdev = calloc(1, sizeof(*bdev)); 545 SPDK_CU_ASSERT_FATAL(bdev != NULL); 546 547 bdev->ctxt = ctx; 548 bdev->name = name; 549 bdev->fn_table = &fn_table; 550 bdev->module = &bdev_ut_if; 551 bdev->blockcnt = 1024; 552 bdev->blocklen = 512; 553 554 spdk_uuid_generate(&bdev->uuid); 555 556 rc = spdk_bdev_register(bdev); 557 poll_threads(); 558 CU_ASSERT(rc == 0); 559 560 return bdev; 561 } 562 563 static struct spdk_bdev * 564 allocate_bdev(char *name) 565 { 566 return allocate_bdev_ctx(name, NULL); 567 } 568 569 static struct spdk_bdev * 570 allocate_vbdev(char *name) 571 { 572 struct spdk_bdev *bdev; 573 int rc; 574 575 bdev = calloc(1, sizeof(*bdev)); 576 SPDK_CU_ASSERT_FATAL(bdev != NULL); 577 578 bdev->name = name; 579 bdev->fn_table = &fn_table; 580 bdev->module = &vbdev_ut_if; 581 582 rc = spdk_bdev_register(bdev); 583 poll_threads(); 584 CU_ASSERT(rc == 0); 585 586 return bdev; 587 } 588 589 static void 590 free_bdev(struct spdk_bdev *bdev) 591 { 592 spdk_bdev_unregister(bdev, NULL, NULL); 593 poll_threads(); 594 memset(bdev, 0xFF, sizeof(*bdev)); 595 free(bdev); 596 } 597 598 static void 599 free_vbdev(struct spdk_bdev *bdev) 600 { 601 spdk_bdev_unregister(bdev, NULL, NULL); 602 poll_threads(); 603 memset(bdev, 0xFF, sizeof(*bdev)); 604 free(bdev); 605 } 606 607 static void 608 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 609 { 610 const char *bdev_name; 611 612 CU_ASSERT(bdev != NULL); 613 CU_ASSERT(rc == 0); 614 bdev_name = spdk_bdev_get_name(bdev); 615 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 616 617 free(stat); 618 619 *(bool *)cb_arg = true; 620 } 621 622 static void 623 bdev_unregister_cb(void *cb_arg, int rc) 624 { 625 g_unregister_arg = cb_arg; 626 g_unregister_rc = rc; 627 } 628 629 static void 630 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 631 { 632 } 633 634 static void 635 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 636 { 637 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 638 639 g_event_type1 = type; 640 if (SPDK_BDEV_EVENT_REMOVE == type) { 641 spdk_bdev_close(desc); 642 } 643 } 644 645 static void 646 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 647 { 648 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 649 650 g_event_type2 = type; 651 if (SPDK_BDEV_EVENT_REMOVE == type) { 652 spdk_bdev_close(desc); 653 } 654 } 655 656 static void 657 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 658 { 659 g_event_type3 = type; 660 } 661 662 static void 663 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 664 { 665 g_event_type4 = type; 666 } 667 668 static void 669 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 670 { 671 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 672 spdk_bdev_free_io(bdev_io); 673 } 674 675 static void 676 get_device_stat_test(void) 677 { 678 struct spdk_bdev *bdev; 679 struct spdk_bdev_io_stat *stat; 680 bool done; 681 682 bdev = allocate_bdev("bdev0"); 683 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 684 if (stat == NULL) { 685 free_bdev(bdev); 686 return; 687 } 688 689 done = false; 690 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 691 while (!done) { poll_threads(); } 692 693 free_bdev(bdev); 694 } 695 696 static void 697 open_write_test(void) 698 { 699 struct spdk_bdev *bdev[9]; 700 struct spdk_bdev_desc *desc[9] = {}; 701 int rc; 702 703 /* 704 * Create a tree of bdevs to test various open w/ write cases. 705 * 706 * bdev0 through bdev3 are physical block devices, such as NVMe 707 * namespaces or Ceph block devices. 708 * 709 * bdev4 is a virtual bdev with multiple base bdevs. This models 710 * caching or RAID use cases. 711 * 712 * bdev5 through bdev7 are all virtual bdevs with the same base 713 * bdev (except bdev7). This models partitioning or logical volume 714 * use cases. 715 * 716 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 717 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 718 * models caching, RAID, partitioning or logical volumes use cases. 719 * 720 * bdev8 is a virtual bdev with multiple base bdevs, but these 721 * base bdevs are themselves virtual bdevs. 722 * 723 * bdev8 724 * | 725 * +----------+ 726 * | | 727 * bdev4 bdev5 bdev6 bdev7 728 * | | | | 729 * +---+---+ +---+ + +---+---+ 730 * | | \ | / \ 731 * bdev0 bdev1 bdev2 bdev3 732 */ 733 734 bdev[0] = allocate_bdev("bdev0"); 735 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 736 CU_ASSERT(rc == 0); 737 738 bdev[1] = allocate_bdev("bdev1"); 739 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 740 CU_ASSERT(rc == 0); 741 742 bdev[2] = allocate_bdev("bdev2"); 743 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 744 CU_ASSERT(rc == 0); 745 746 bdev[3] = allocate_bdev("bdev3"); 747 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 748 CU_ASSERT(rc == 0); 749 750 bdev[4] = allocate_vbdev("bdev4"); 751 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 752 CU_ASSERT(rc == 0); 753 754 bdev[5] = allocate_vbdev("bdev5"); 755 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 756 CU_ASSERT(rc == 0); 757 758 bdev[6] = allocate_vbdev("bdev6"); 759 760 bdev[7] = allocate_vbdev("bdev7"); 761 762 bdev[8] = allocate_vbdev("bdev8"); 763 764 /* Open bdev0 read-only. This should succeed. */ 765 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 766 CU_ASSERT(rc == 0); 767 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 768 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 769 spdk_bdev_close(desc[0]); 770 771 /* 772 * Open bdev1 read/write. This should fail since bdev1 has been claimed 773 * by a vbdev module. 774 */ 775 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 776 CU_ASSERT(rc == -EPERM); 777 778 /* 779 * Open bdev4 read/write. This should fail since bdev3 has been claimed 780 * by a vbdev module. 781 */ 782 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 783 CU_ASSERT(rc == -EPERM); 784 785 /* Open bdev4 read-only. This should succeed. */ 786 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 787 CU_ASSERT(rc == 0); 788 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 789 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 790 spdk_bdev_close(desc[4]); 791 792 /* 793 * Open bdev8 read/write. This should succeed since it is a leaf 794 * bdev. 795 */ 796 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 797 CU_ASSERT(rc == 0); 798 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 799 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 800 spdk_bdev_close(desc[8]); 801 802 /* 803 * Open bdev5 read/write. This should fail since bdev4 has been claimed 804 * by a vbdev module. 805 */ 806 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 807 CU_ASSERT(rc == -EPERM); 808 809 /* Open bdev4 read-only. This should succeed. */ 810 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 811 CU_ASSERT(rc == 0); 812 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 813 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 814 spdk_bdev_close(desc[5]); 815 816 free_vbdev(bdev[8]); 817 818 free_vbdev(bdev[5]); 819 free_vbdev(bdev[6]); 820 free_vbdev(bdev[7]); 821 822 free_vbdev(bdev[4]); 823 824 free_bdev(bdev[0]); 825 free_bdev(bdev[1]); 826 free_bdev(bdev[2]); 827 free_bdev(bdev[3]); 828 } 829 830 static void 831 claim_test(void) 832 { 833 struct spdk_bdev *bdev; 834 struct spdk_bdev_desc *desc, *open_desc; 835 int rc; 836 uint32_t count; 837 838 /* 839 * A vbdev that uses a read-only bdev may need it to remain read-only. 840 * To do so, it opens the bdev read-only, then claims it without 841 * passing a spdk_bdev_desc. 842 */ 843 bdev = allocate_bdev("bdev0"); 844 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 845 CU_ASSERT(rc == 0); 846 CU_ASSERT(desc->write == false); 847 848 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 849 CU_ASSERT(rc == 0); 850 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 851 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 852 853 /* There should be only one open descriptor and it should still be ro */ 854 count = 0; 855 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 856 CU_ASSERT(open_desc == desc); 857 CU_ASSERT(!open_desc->write); 858 count++; 859 } 860 CU_ASSERT(count == 1); 861 862 /* A read-only bdev is upgraded to read-write if desc is passed. */ 863 spdk_bdev_module_release_bdev(bdev); 864 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 865 CU_ASSERT(rc == 0); 866 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 867 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 868 869 /* There should be only one open descriptor and it should be rw */ 870 count = 0; 871 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 872 CU_ASSERT(open_desc == desc); 873 CU_ASSERT(open_desc->write); 874 count++; 875 } 876 CU_ASSERT(count == 1); 877 878 spdk_bdev_close(desc); 879 free_bdev(bdev); 880 } 881 882 static void 883 bytes_to_blocks_test(void) 884 { 885 struct spdk_bdev bdev; 886 uint64_t offset_blocks, num_blocks; 887 888 memset(&bdev, 0, sizeof(bdev)); 889 890 bdev.blocklen = 512; 891 892 /* All parameters valid */ 893 offset_blocks = 0; 894 num_blocks = 0; 895 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 896 CU_ASSERT(offset_blocks == 1); 897 CU_ASSERT(num_blocks == 2); 898 899 /* Offset not a block multiple */ 900 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 901 902 /* Length not a block multiple */ 903 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 904 905 /* In case blocklen not the power of two */ 906 bdev.blocklen = 100; 907 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 908 CU_ASSERT(offset_blocks == 1); 909 CU_ASSERT(num_blocks == 2); 910 911 /* Offset not a block multiple */ 912 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 913 914 /* Length not a block multiple */ 915 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 916 } 917 918 static void 919 num_blocks_test(void) 920 { 921 struct spdk_bdev bdev; 922 struct spdk_bdev_desc *desc = NULL; 923 int rc; 924 925 memset(&bdev, 0, sizeof(bdev)); 926 bdev.name = "num_blocks"; 927 bdev.fn_table = &fn_table; 928 bdev.module = &bdev_ut_if; 929 spdk_bdev_register(&bdev); 930 poll_threads(); 931 spdk_bdev_notify_blockcnt_change(&bdev, 50); 932 933 /* Growing block number */ 934 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 935 /* Shrinking block number */ 936 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 937 938 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 939 CU_ASSERT(rc == 0); 940 SPDK_CU_ASSERT_FATAL(desc != NULL); 941 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 942 943 /* Growing block number */ 944 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 945 /* Shrinking block number */ 946 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 947 948 g_event_type1 = 0xFF; 949 /* Growing block number */ 950 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 951 952 poll_threads(); 953 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 954 955 g_event_type1 = 0xFF; 956 /* Growing block number and closing */ 957 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 958 959 spdk_bdev_close(desc); 960 spdk_bdev_unregister(&bdev, NULL, NULL); 961 962 poll_threads(); 963 964 /* Callback is not called for closed device */ 965 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 966 } 967 968 static void 969 io_valid_test(void) 970 { 971 struct spdk_bdev bdev; 972 973 memset(&bdev, 0, sizeof(bdev)); 974 975 bdev.blocklen = 512; 976 spdk_spin_init(&bdev.internal.spinlock); 977 978 spdk_bdev_notify_blockcnt_change(&bdev, 100); 979 980 /* All parameters valid */ 981 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 982 983 /* Last valid block */ 984 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 985 986 /* Offset past end of bdev */ 987 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 988 989 /* Offset + length past end of bdev */ 990 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 991 992 /* Offset near end of uint64_t range (2^64 - 1) */ 993 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 994 995 spdk_spin_destroy(&bdev.internal.spinlock); 996 } 997 998 static void 999 alias_add_del_test(void) 1000 { 1001 struct spdk_bdev *bdev[3]; 1002 int rc; 1003 1004 /* Creating and registering bdevs */ 1005 bdev[0] = allocate_bdev("bdev0"); 1006 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 1007 1008 bdev[1] = allocate_bdev("bdev1"); 1009 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 1010 1011 bdev[2] = allocate_bdev("bdev2"); 1012 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 1013 1014 poll_threads(); 1015 1016 /* 1017 * Trying adding an alias identical to name. 1018 * Alias is identical to name, so it can not be added to aliases list 1019 */ 1020 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 1021 CU_ASSERT(rc == -EEXIST); 1022 1023 /* 1024 * Trying to add empty alias, 1025 * this one should fail 1026 */ 1027 rc = spdk_bdev_alias_add(bdev[0], NULL); 1028 CU_ASSERT(rc == -EINVAL); 1029 1030 /* Trying adding same alias to two different registered bdevs */ 1031 1032 /* Alias is used first time, so this one should pass */ 1033 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1034 CU_ASSERT(rc == 0); 1035 1036 /* Alias was added to another bdev, so this one should fail */ 1037 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1038 CU_ASSERT(rc == -EEXIST); 1039 1040 /* Alias is used first time, so this one should pass */ 1041 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1042 CU_ASSERT(rc == 0); 1043 1044 /* Trying removing an alias from registered bdevs */ 1045 1046 /* Alias is not on a bdev aliases list, so this one should fail */ 1047 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1048 CU_ASSERT(rc == -ENOENT); 1049 1050 /* Alias is present on a bdev aliases list, so this one should pass */ 1051 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1052 CU_ASSERT(rc == 0); 1053 1054 /* Alias is present on a bdev aliases list, so this one should pass */ 1055 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1056 CU_ASSERT(rc == 0); 1057 1058 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1059 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1060 CU_ASSERT(rc != 0); 1061 1062 /* Trying to del all alias from empty alias list */ 1063 spdk_bdev_alias_del_all(bdev[2]); 1064 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1065 1066 /* Trying to del all alias from non-empty alias list */ 1067 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1068 CU_ASSERT(rc == 0); 1069 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1070 CU_ASSERT(rc == 0); 1071 spdk_bdev_alias_del_all(bdev[2]); 1072 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1073 1074 /* Unregister and free bdevs */ 1075 spdk_bdev_unregister(bdev[0], NULL, NULL); 1076 spdk_bdev_unregister(bdev[1], NULL, NULL); 1077 spdk_bdev_unregister(bdev[2], NULL, NULL); 1078 1079 poll_threads(); 1080 1081 free(bdev[0]); 1082 free(bdev[1]); 1083 free(bdev[2]); 1084 } 1085 1086 static void 1087 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1088 { 1089 g_io_done = true; 1090 g_io_status = bdev_io->internal.status; 1091 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1092 (bdev_io->u.bdev.zcopy.start)) { 1093 g_zcopy_bdev_io = bdev_io; 1094 } else { 1095 spdk_bdev_free_io(bdev_io); 1096 g_zcopy_bdev_io = NULL; 1097 } 1098 } 1099 1100 static void 1101 bdev_init_cb(void *arg, int rc) 1102 { 1103 CU_ASSERT(rc == 0); 1104 } 1105 1106 static void 1107 bdev_fini_cb(void *arg) 1108 { 1109 } 1110 1111 static void 1112 ut_init_bdev(struct spdk_bdev_opts *opts) 1113 { 1114 int rc; 1115 1116 if (opts != NULL) { 1117 rc = spdk_bdev_set_opts(opts); 1118 CU_ASSERT(rc == 0); 1119 } 1120 rc = spdk_iobuf_initialize(); 1121 CU_ASSERT(rc == 0); 1122 spdk_bdev_initialize(bdev_init_cb, NULL); 1123 poll_threads(); 1124 } 1125 1126 static void 1127 ut_fini_bdev(void) 1128 { 1129 spdk_bdev_finish(bdev_fini_cb, NULL); 1130 spdk_iobuf_finish(bdev_fini_cb, NULL); 1131 poll_threads(); 1132 } 1133 1134 struct bdev_ut_io_wait_entry { 1135 struct spdk_bdev_io_wait_entry entry; 1136 struct spdk_io_channel *io_ch; 1137 struct spdk_bdev_desc *desc; 1138 bool submitted; 1139 }; 1140 1141 static void 1142 io_wait_cb(void *arg) 1143 { 1144 struct bdev_ut_io_wait_entry *entry = arg; 1145 int rc; 1146 1147 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1148 CU_ASSERT(rc == 0); 1149 entry->submitted = true; 1150 } 1151 1152 static void 1153 bdev_io_types_test(void) 1154 { 1155 struct spdk_bdev *bdev; 1156 struct spdk_bdev_desc *desc = NULL; 1157 struct spdk_io_channel *io_ch; 1158 struct spdk_bdev_opts bdev_opts = {}; 1159 int rc; 1160 1161 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1162 bdev_opts.bdev_io_pool_size = 4; 1163 bdev_opts.bdev_io_cache_size = 2; 1164 ut_init_bdev(&bdev_opts); 1165 1166 bdev = allocate_bdev("bdev0"); 1167 1168 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1169 CU_ASSERT(rc == 0); 1170 poll_threads(); 1171 SPDK_CU_ASSERT_FATAL(desc != NULL); 1172 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1173 io_ch = spdk_bdev_get_io_channel(desc); 1174 CU_ASSERT(io_ch != NULL); 1175 1176 /* WRITE and WRITE ZEROES are not supported */ 1177 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1178 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1179 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1180 CU_ASSERT(rc == -ENOTSUP); 1181 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1182 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1183 1184 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1185 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1186 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1187 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1188 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1189 CU_ASSERT(rc == -ENOTSUP); 1190 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1191 CU_ASSERT(rc == -ENOTSUP); 1192 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1193 CU_ASSERT(rc == -ENOTSUP); 1194 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1195 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1196 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1197 1198 spdk_put_io_channel(io_ch); 1199 spdk_bdev_close(desc); 1200 free_bdev(bdev); 1201 ut_fini_bdev(); 1202 } 1203 1204 static void 1205 bdev_io_wait_test(void) 1206 { 1207 struct spdk_bdev *bdev; 1208 struct spdk_bdev_desc *desc = NULL; 1209 struct spdk_io_channel *io_ch; 1210 struct spdk_bdev_opts bdev_opts = {}; 1211 struct bdev_ut_io_wait_entry io_wait_entry; 1212 struct bdev_ut_io_wait_entry io_wait_entry2; 1213 int rc; 1214 1215 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1216 bdev_opts.bdev_io_pool_size = 4; 1217 bdev_opts.bdev_io_cache_size = 2; 1218 ut_init_bdev(&bdev_opts); 1219 1220 bdev = allocate_bdev("bdev0"); 1221 1222 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1223 CU_ASSERT(rc == 0); 1224 poll_threads(); 1225 SPDK_CU_ASSERT_FATAL(desc != NULL); 1226 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1227 io_ch = spdk_bdev_get_io_channel(desc); 1228 CU_ASSERT(io_ch != NULL); 1229 1230 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1231 CU_ASSERT(rc == 0); 1232 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1233 CU_ASSERT(rc == 0); 1234 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1235 CU_ASSERT(rc == 0); 1236 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1237 CU_ASSERT(rc == 0); 1238 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1239 1240 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1241 CU_ASSERT(rc == -ENOMEM); 1242 1243 io_wait_entry.entry.bdev = bdev; 1244 io_wait_entry.entry.cb_fn = io_wait_cb; 1245 io_wait_entry.entry.cb_arg = &io_wait_entry; 1246 io_wait_entry.io_ch = io_ch; 1247 io_wait_entry.desc = desc; 1248 io_wait_entry.submitted = false; 1249 /* Cannot use the same io_wait_entry for two different calls. */ 1250 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1251 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1252 1253 /* Queue two I/O waits. */ 1254 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1255 CU_ASSERT(rc == 0); 1256 CU_ASSERT(io_wait_entry.submitted == false); 1257 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1258 CU_ASSERT(rc == 0); 1259 CU_ASSERT(io_wait_entry2.submitted == false); 1260 1261 stub_complete_io(1); 1262 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1263 CU_ASSERT(io_wait_entry.submitted == true); 1264 CU_ASSERT(io_wait_entry2.submitted == false); 1265 1266 stub_complete_io(1); 1267 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1268 CU_ASSERT(io_wait_entry2.submitted == true); 1269 1270 stub_complete_io(4); 1271 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1272 1273 spdk_put_io_channel(io_ch); 1274 spdk_bdev_close(desc); 1275 free_bdev(bdev); 1276 ut_fini_bdev(); 1277 } 1278 1279 static void 1280 bdev_io_spans_split_test(void) 1281 { 1282 struct spdk_bdev bdev; 1283 struct spdk_bdev_io bdev_io; 1284 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1285 1286 memset(&bdev, 0, sizeof(bdev)); 1287 bdev_io.u.bdev.iovs = iov; 1288 1289 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1290 bdev.optimal_io_boundary = 0; 1291 bdev.max_segment_size = 0; 1292 bdev.max_num_segments = 0; 1293 bdev_io.bdev = &bdev; 1294 1295 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1296 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1297 1298 bdev.split_on_optimal_io_boundary = true; 1299 bdev.optimal_io_boundary = 32; 1300 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1301 1302 /* RESETs are not based on LBAs - so this should return false. */ 1303 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1304 1305 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1306 bdev_io.u.bdev.offset_blocks = 0; 1307 bdev_io.u.bdev.num_blocks = 32; 1308 1309 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1310 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1311 1312 bdev_io.u.bdev.num_blocks = 33; 1313 1314 /* This I/O spans a boundary. */ 1315 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1316 1317 bdev_io.u.bdev.num_blocks = 32; 1318 bdev.max_segment_size = 512 * 32; 1319 bdev.max_num_segments = 1; 1320 bdev_io.u.bdev.iovcnt = 1; 1321 iov[0].iov_len = 512; 1322 1323 /* Does not cross and exceed max_size or max_segs */ 1324 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1325 1326 bdev.split_on_optimal_io_boundary = false; 1327 bdev.max_segment_size = 512; 1328 bdev.max_num_segments = 1; 1329 bdev_io.u.bdev.iovcnt = 2; 1330 1331 /* Exceed max_segs */ 1332 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1333 1334 bdev.max_num_segments = 2; 1335 iov[0].iov_len = 513; 1336 iov[1].iov_len = 512; 1337 1338 /* Exceed max_sizes */ 1339 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1340 1341 bdev.max_segment_size = 0; 1342 bdev.write_unit_size = 32; 1343 bdev.split_on_write_unit = true; 1344 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1345 1346 /* This I/O is one write unit */ 1347 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1348 1349 bdev_io.u.bdev.num_blocks = 32 * 2; 1350 1351 /* This I/O is more than one write unit */ 1352 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1353 1354 bdev_io.u.bdev.offset_blocks = 1; 1355 bdev_io.u.bdev.num_blocks = 32; 1356 1357 /* This I/O is not aligned to write unit size */ 1358 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1359 } 1360 1361 static void 1362 bdev_io_boundary_split_test(void) 1363 { 1364 struct spdk_bdev *bdev; 1365 struct spdk_bdev_desc *desc = NULL; 1366 struct spdk_io_channel *io_ch; 1367 struct spdk_bdev_opts bdev_opts = {}; 1368 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1369 struct ut_expected_io *expected_io; 1370 void *md_buf = (void *)0xFF000000; 1371 uint64_t i; 1372 int rc; 1373 1374 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1375 bdev_opts.bdev_io_pool_size = 512; 1376 bdev_opts.bdev_io_cache_size = 64; 1377 ut_init_bdev(&bdev_opts); 1378 1379 bdev = allocate_bdev("bdev0"); 1380 1381 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1382 CU_ASSERT(rc == 0); 1383 SPDK_CU_ASSERT_FATAL(desc != NULL); 1384 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1385 io_ch = spdk_bdev_get_io_channel(desc); 1386 CU_ASSERT(io_ch != NULL); 1387 1388 bdev->optimal_io_boundary = 16; 1389 bdev->split_on_optimal_io_boundary = false; 1390 1391 g_io_done = false; 1392 1393 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1394 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1395 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1396 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1397 1398 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1399 CU_ASSERT(rc == 0); 1400 CU_ASSERT(g_io_done == false); 1401 1402 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1403 stub_complete_io(1); 1404 CU_ASSERT(g_io_done == true); 1405 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1406 1407 bdev->split_on_optimal_io_boundary = true; 1408 bdev->md_interleave = false; 1409 bdev->md_len = 8; 1410 1411 /* Now test that a single-vector command is split correctly. 1412 * Offset 14, length 8, payload 0xF000 1413 * Child - Offset 14, length 2, payload 0xF000 1414 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1415 * 1416 * Set up the expected values before calling spdk_bdev_read_blocks 1417 */ 1418 g_io_done = false; 1419 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1420 expected_io->md_buf = md_buf; 1421 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1422 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1423 1424 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1425 expected_io->md_buf = md_buf + 2 * 8; 1426 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1427 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1428 1429 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1430 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1431 14, 8, io_done, NULL); 1432 CU_ASSERT(rc == 0); 1433 CU_ASSERT(g_io_done == false); 1434 1435 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1436 stub_complete_io(2); 1437 CU_ASSERT(g_io_done == true); 1438 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1439 1440 /* Now set up a more complex, multi-vector command that needs to be split, 1441 * including splitting iovecs. 1442 */ 1443 iov[0].iov_base = (void *)0x10000; 1444 iov[0].iov_len = 512; 1445 iov[1].iov_base = (void *)0x20000; 1446 iov[1].iov_len = 20 * 512; 1447 iov[2].iov_base = (void *)0x30000; 1448 iov[2].iov_len = 11 * 512; 1449 1450 g_io_done = false; 1451 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1452 expected_io->md_buf = md_buf; 1453 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1454 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1455 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1456 1457 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1458 expected_io->md_buf = md_buf + 2 * 8; 1459 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1460 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1461 1462 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1463 expected_io->md_buf = md_buf + 18 * 8; 1464 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1465 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1466 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1467 1468 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1469 14, 32, io_done, NULL); 1470 CU_ASSERT(rc == 0); 1471 CU_ASSERT(g_io_done == false); 1472 1473 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1474 stub_complete_io(3); 1475 CU_ASSERT(g_io_done == true); 1476 1477 /* Test multi vector command that needs to be split by strip and then needs to be 1478 * split further due to the capacity of child iovs. 1479 */ 1480 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1481 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1482 iov[i].iov_len = 512; 1483 } 1484 1485 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1486 g_io_done = false; 1487 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1488 SPDK_BDEV_IO_NUM_CHILD_IOV); 1489 expected_io->md_buf = md_buf; 1490 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1491 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1492 } 1493 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1494 1495 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1496 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1497 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1498 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1499 ut_expected_io_set_iov(expected_io, i, 1500 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1501 } 1502 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1503 1504 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1505 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1506 CU_ASSERT(rc == 0); 1507 CU_ASSERT(g_io_done == false); 1508 1509 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1510 stub_complete_io(1); 1511 CU_ASSERT(g_io_done == false); 1512 1513 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1514 stub_complete_io(1); 1515 CU_ASSERT(g_io_done == true); 1516 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1517 1518 /* Test multi vector command that needs to be split by strip and then needs to be 1519 * split further due to the capacity of child iovs. In this case, the length of 1520 * the rest of iovec array with an I/O boundary is the multiple of block size. 1521 */ 1522 1523 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1524 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1525 */ 1526 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1527 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1528 iov[i].iov_len = 512; 1529 } 1530 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1531 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1532 iov[i].iov_len = 256; 1533 } 1534 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1535 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1536 1537 /* Add an extra iovec to trigger split */ 1538 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1539 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1540 1541 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1542 g_io_done = false; 1543 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1544 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1545 expected_io->md_buf = md_buf; 1546 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1547 ut_expected_io_set_iov(expected_io, i, 1548 (void *)((i + 1) * 0x10000), 512); 1549 } 1550 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1551 ut_expected_io_set_iov(expected_io, i, 1552 (void *)((i + 1) * 0x10000), 256); 1553 } 1554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1555 1556 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1557 1, 1); 1558 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1559 ut_expected_io_set_iov(expected_io, 0, 1560 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1561 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1562 1563 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1564 1, 1); 1565 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1566 ut_expected_io_set_iov(expected_io, 0, 1567 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1568 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1569 1570 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1571 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1572 CU_ASSERT(rc == 0); 1573 CU_ASSERT(g_io_done == false); 1574 1575 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1576 stub_complete_io(1); 1577 CU_ASSERT(g_io_done == false); 1578 1579 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1580 stub_complete_io(2); 1581 CU_ASSERT(g_io_done == true); 1582 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1583 1584 /* Test multi vector command that needs to be split by strip and then needs to be 1585 * split further due to the capacity of child iovs, the child request offset should 1586 * be rewind to last aligned offset and go success without error. 1587 */ 1588 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1589 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1590 iov[i].iov_len = 512; 1591 } 1592 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1593 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1594 1595 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1596 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1597 1598 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1599 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1600 1601 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1602 g_io_done = false; 1603 g_io_status = 0; 1604 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1605 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1606 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1607 expected_io->md_buf = md_buf; 1608 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1609 ut_expected_io_set_iov(expected_io, i, 1610 (void *)((i + 1) * 0x10000), 512); 1611 } 1612 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1613 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1614 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1615 1, 2); 1616 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1617 ut_expected_io_set_iov(expected_io, 0, 1618 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1619 ut_expected_io_set_iov(expected_io, 1, 1620 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1621 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1622 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1623 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1624 1, 1); 1625 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1626 ut_expected_io_set_iov(expected_io, 0, 1627 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1628 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1629 1630 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1631 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1632 CU_ASSERT(rc == 0); 1633 CU_ASSERT(g_io_done == false); 1634 1635 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1636 stub_complete_io(1); 1637 CU_ASSERT(g_io_done == false); 1638 1639 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1640 stub_complete_io(2); 1641 CU_ASSERT(g_io_done == true); 1642 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1643 1644 /* Test multi vector command that needs to be split due to the IO boundary and 1645 * the capacity of child iovs. Especially test the case when the command is 1646 * split due to the capacity of child iovs, the tail address is not aligned with 1647 * block size and is rewinded to the aligned address. 1648 * 1649 * The iovecs used in read request is complex but is based on the data 1650 * collected in the real issue. We change the base addresses but keep the lengths 1651 * not to loose the credibility of the test. 1652 */ 1653 bdev->optimal_io_boundary = 128; 1654 g_io_done = false; 1655 g_io_status = 0; 1656 1657 for (i = 0; i < 31; i++) { 1658 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1659 iov[i].iov_len = 1024; 1660 } 1661 iov[31].iov_base = (void *)0xFEED1F00000; 1662 iov[31].iov_len = 32768; 1663 iov[32].iov_base = (void *)0xFEED2000000; 1664 iov[32].iov_len = 160; 1665 iov[33].iov_base = (void *)0xFEED2100000; 1666 iov[33].iov_len = 4096; 1667 iov[34].iov_base = (void *)0xFEED2200000; 1668 iov[34].iov_len = 4096; 1669 iov[35].iov_base = (void *)0xFEED2300000; 1670 iov[35].iov_len = 4096; 1671 iov[36].iov_base = (void *)0xFEED2400000; 1672 iov[36].iov_len = 4096; 1673 iov[37].iov_base = (void *)0xFEED2500000; 1674 iov[37].iov_len = 4096; 1675 iov[38].iov_base = (void *)0xFEED2600000; 1676 iov[38].iov_len = 4096; 1677 iov[39].iov_base = (void *)0xFEED2700000; 1678 iov[39].iov_len = 4096; 1679 iov[40].iov_base = (void *)0xFEED2800000; 1680 iov[40].iov_len = 4096; 1681 iov[41].iov_base = (void *)0xFEED2900000; 1682 iov[41].iov_len = 4096; 1683 iov[42].iov_base = (void *)0xFEED2A00000; 1684 iov[42].iov_len = 4096; 1685 iov[43].iov_base = (void *)0xFEED2B00000; 1686 iov[43].iov_len = 12288; 1687 iov[44].iov_base = (void *)0xFEED2C00000; 1688 iov[44].iov_len = 8192; 1689 iov[45].iov_base = (void *)0xFEED2F00000; 1690 iov[45].iov_len = 4096; 1691 iov[46].iov_base = (void *)0xFEED3000000; 1692 iov[46].iov_len = 4096; 1693 iov[47].iov_base = (void *)0xFEED3100000; 1694 iov[47].iov_len = 4096; 1695 iov[48].iov_base = (void *)0xFEED3200000; 1696 iov[48].iov_len = 24576; 1697 iov[49].iov_base = (void *)0xFEED3300000; 1698 iov[49].iov_len = 16384; 1699 iov[50].iov_base = (void *)0xFEED3400000; 1700 iov[50].iov_len = 12288; 1701 iov[51].iov_base = (void *)0xFEED3500000; 1702 iov[51].iov_len = 4096; 1703 iov[52].iov_base = (void *)0xFEED3600000; 1704 iov[52].iov_len = 4096; 1705 iov[53].iov_base = (void *)0xFEED3700000; 1706 iov[53].iov_len = 4096; 1707 iov[54].iov_base = (void *)0xFEED3800000; 1708 iov[54].iov_len = 28672; 1709 iov[55].iov_base = (void *)0xFEED3900000; 1710 iov[55].iov_len = 20480; 1711 iov[56].iov_base = (void *)0xFEED3A00000; 1712 iov[56].iov_len = 4096; 1713 iov[57].iov_base = (void *)0xFEED3B00000; 1714 iov[57].iov_len = 12288; 1715 iov[58].iov_base = (void *)0xFEED3C00000; 1716 iov[58].iov_len = 4096; 1717 iov[59].iov_base = (void *)0xFEED3D00000; 1718 iov[59].iov_len = 4096; 1719 iov[60].iov_base = (void *)0xFEED3E00000; 1720 iov[60].iov_len = 352; 1721 1722 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1723 * of child iovs, 1724 */ 1725 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1726 expected_io->md_buf = md_buf; 1727 for (i = 0; i < 32; i++) { 1728 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1729 } 1730 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1731 1732 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1733 * split by the IO boundary requirement. 1734 */ 1735 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1736 expected_io->md_buf = md_buf + 126 * 8; 1737 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1738 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1739 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1740 1741 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1742 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1743 */ 1744 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1745 expected_io->md_buf = md_buf + 128 * 8; 1746 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1747 iov[33].iov_len - 864); 1748 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1749 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1750 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1751 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1752 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1753 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1754 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1755 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1756 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1757 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1758 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1759 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1760 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1761 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1762 1763 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1764 * first 864 bytes of iov[52] split by the IO boundary requirement. 1765 */ 1766 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1767 expected_io->md_buf = md_buf + 256 * 8; 1768 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1769 iov[46].iov_len - 864); 1770 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1771 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1772 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1773 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1774 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1775 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1776 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1777 1778 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1779 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1780 */ 1781 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1782 expected_io->md_buf = md_buf + 384 * 8; 1783 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1784 iov[52].iov_len - 864); 1785 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1786 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1787 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1788 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1789 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1790 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1791 1792 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1793 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1794 */ 1795 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1796 expected_io->md_buf = md_buf + 512 * 8; 1797 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1798 iov[57].iov_len - 4960); 1799 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1800 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1801 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1802 1803 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1804 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1805 expected_io->md_buf = md_buf + 542 * 8; 1806 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1807 iov[59].iov_len - 3936); 1808 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1809 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1810 1811 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1812 0, 543, io_done, NULL); 1813 CU_ASSERT(rc == 0); 1814 CU_ASSERT(g_io_done == false); 1815 1816 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1817 stub_complete_io(1); 1818 CU_ASSERT(g_io_done == false); 1819 1820 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1821 stub_complete_io(5); 1822 CU_ASSERT(g_io_done == false); 1823 1824 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1825 stub_complete_io(1); 1826 CU_ASSERT(g_io_done == true); 1827 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1828 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1829 1830 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1831 * split, so test that. 1832 */ 1833 bdev->optimal_io_boundary = 15; 1834 g_io_done = false; 1835 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1836 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1837 1838 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1839 CU_ASSERT(rc == 0); 1840 CU_ASSERT(g_io_done == false); 1841 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1842 stub_complete_io(1); 1843 CU_ASSERT(g_io_done == true); 1844 1845 /* Test an UNMAP. This should also not be split. */ 1846 bdev->optimal_io_boundary = 16; 1847 g_io_done = false; 1848 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1849 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1850 1851 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1852 CU_ASSERT(rc == 0); 1853 CU_ASSERT(g_io_done == false); 1854 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1855 stub_complete_io(1); 1856 CU_ASSERT(g_io_done == true); 1857 1858 /* Test a FLUSH. This should also not be split. */ 1859 bdev->optimal_io_boundary = 16; 1860 g_io_done = false; 1861 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1862 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1863 1864 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1865 CU_ASSERT(rc == 0); 1866 CU_ASSERT(g_io_done == false); 1867 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1868 stub_complete_io(1); 1869 CU_ASSERT(g_io_done == true); 1870 1871 /* Test a COPY. This should also not be split. */ 1872 bdev->optimal_io_boundary = 15; 1873 g_io_done = false; 1874 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1875 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1876 1877 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1878 CU_ASSERT(rc == 0); 1879 CU_ASSERT(g_io_done == false); 1880 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1881 stub_complete_io(1); 1882 CU_ASSERT(g_io_done == true); 1883 1884 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1885 1886 /* Children requests return an error status */ 1887 bdev->optimal_io_boundary = 16; 1888 iov[0].iov_base = (void *)0x10000; 1889 iov[0].iov_len = 512 * 64; 1890 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1891 g_io_done = false; 1892 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1893 1894 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1895 CU_ASSERT(rc == 0); 1896 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1897 stub_complete_io(4); 1898 CU_ASSERT(g_io_done == false); 1899 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1900 stub_complete_io(1); 1901 CU_ASSERT(g_io_done == true); 1902 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1903 1904 /* Test if a multi vector command terminated with failure before continuing 1905 * splitting process when one of child I/O failed. 1906 * The multi vector command is as same as the above that needs to be split by strip 1907 * and then needs to be split further due to the capacity of child iovs. 1908 */ 1909 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1910 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1911 iov[i].iov_len = 512; 1912 } 1913 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1914 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1915 1916 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1917 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1918 1919 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1920 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1921 1922 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1923 1924 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1925 g_io_done = false; 1926 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1927 1928 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1929 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1930 CU_ASSERT(rc == 0); 1931 CU_ASSERT(g_io_done == false); 1932 1933 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1934 stub_complete_io(1); 1935 CU_ASSERT(g_io_done == true); 1936 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1937 1938 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1939 1940 /* for this test we will create the following conditions to hit the code path where 1941 * we are trying to send and IO following a split that has no iovs because we had to 1942 * trim them for alignment reasons. 1943 * 1944 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1945 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1946 * position 30 and overshoot by 0x2e. 1947 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1948 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1949 * which eliniates that vector so we just send the first split IO with 30 vectors 1950 * and let the completion pick up the last 2 vectors. 1951 */ 1952 bdev->optimal_io_boundary = 32; 1953 bdev->split_on_optimal_io_boundary = true; 1954 g_io_done = false; 1955 1956 /* Init all parent IOVs to 0x212 */ 1957 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1958 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1959 iov[i].iov_len = 0x212; 1960 } 1961 1962 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1963 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1964 /* expect 0-29 to be 1:1 with the parent iov */ 1965 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1966 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1967 } 1968 1969 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1970 * where 0x1e is the amount we overshot the 16K boundary 1971 */ 1972 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1973 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1974 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1975 1976 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1977 * shortened that take it to the next boundary and then a final one to get us to 1978 * 0x4200 bytes for the IO. 1979 */ 1980 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1981 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 1982 /* position 30 picked up the remaining bytes to the next boundary */ 1983 ut_expected_io_set_iov(expected_io, 0, 1984 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1985 1986 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1987 ut_expected_io_set_iov(expected_io, 1, 1988 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1989 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1990 1991 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 1992 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1993 CU_ASSERT(rc == 0); 1994 CU_ASSERT(g_io_done == false); 1995 1996 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1997 stub_complete_io(1); 1998 CU_ASSERT(g_io_done == false); 1999 2000 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2001 stub_complete_io(1); 2002 CU_ASSERT(g_io_done == true); 2003 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2004 2005 spdk_put_io_channel(io_ch); 2006 spdk_bdev_close(desc); 2007 free_bdev(bdev); 2008 ut_fini_bdev(); 2009 } 2010 2011 static void 2012 bdev_io_max_size_and_segment_split_test(void) 2013 { 2014 struct spdk_bdev *bdev; 2015 struct spdk_bdev_desc *desc = NULL; 2016 struct spdk_io_channel *io_ch; 2017 struct spdk_bdev_opts bdev_opts = {}; 2018 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2019 struct ut_expected_io *expected_io; 2020 uint64_t i; 2021 int rc; 2022 2023 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2024 bdev_opts.bdev_io_pool_size = 512; 2025 bdev_opts.bdev_io_cache_size = 64; 2026 bdev_opts.opts_size = sizeof(bdev_opts); 2027 ut_init_bdev(&bdev_opts); 2028 2029 bdev = allocate_bdev("bdev0"); 2030 2031 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2032 CU_ASSERT(rc == 0); 2033 SPDK_CU_ASSERT_FATAL(desc != NULL); 2034 io_ch = spdk_bdev_get_io_channel(desc); 2035 CU_ASSERT(io_ch != NULL); 2036 2037 bdev->split_on_optimal_io_boundary = false; 2038 bdev->optimal_io_boundary = 0; 2039 2040 /* Case 0 max_num_segments == 0. 2041 * but segment size 2 * 512 > 512 2042 */ 2043 bdev->max_segment_size = 512; 2044 bdev->max_num_segments = 0; 2045 g_io_done = false; 2046 2047 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2048 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2049 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2050 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2051 2052 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2053 CU_ASSERT(rc == 0); 2054 CU_ASSERT(g_io_done == false); 2055 2056 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2057 stub_complete_io(1); 2058 CU_ASSERT(g_io_done == true); 2059 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2060 2061 /* Case 1 max_segment_size == 0 2062 * but iov num 2 > 1. 2063 */ 2064 bdev->max_segment_size = 0; 2065 bdev->max_num_segments = 1; 2066 g_io_done = false; 2067 2068 iov[0].iov_base = (void *)0x10000; 2069 iov[0].iov_len = 512; 2070 iov[1].iov_base = (void *)0x20000; 2071 iov[1].iov_len = 8 * 512; 2072 2073 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2074 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2075 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2076 2077 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2078 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2079 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2080 2081 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2082 CU_ASSERT(rc == 0); 2083 CU_ASSERT(g_io_done == false); 2084 2085 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2086 stub_complete_io(2); 2087 CU_ASSERT(g_io_done == true); 2088 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2089 2090 /* Test that a non-vector command is split correctly. 2091 * Set up the expected values before calling spdk_bdev_read_blocks 2092 */ 2093 bdev->max_segment_size = 512; 2094 bdev->max_num_segments = 1; 2095 g_io_done = false; 2096 2097 /* Child IO 0 */ 2098 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2099 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2100 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2101 2102 /* Child IO 1 */ 2103 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2104 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2105 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2106 2107 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2108 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2109 CU_ASSERT(rc == 0); 2110 CU_ASSERT(g_io_done == false); 2111 2112 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2113 stub_complete_io(2); 2114 CU_ASSERT(g_io_done == true); 2115 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2116 2117 /* Now set up a more complex, multi-vector command that needs to be split, 2118 * including splitting iovecs. 2119 */ 2120 bdev->max_segment_size = 2 * 512; 2121 bdev->max_num_segments = 1; 2122 g_io_done = false; 2123 2124 iov[0].iov_base = (void *)0x10000; 2125 iov[0].iov_len = 2 * 512; 2126 iov[1].iov_base = (void *)0x20000; 2127 iov[1].iov_len = 4 * 512; 2128 iov[2].iov_base = (void *)0x30000; 2129 iov[2].iov_len = 6 * 512; 2130 2131 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2132 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2133 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2134 2135 /* Split iov[1].size to 2 iov entries then split the segments */ 2136 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2137 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2138 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2139 2140 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2141 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2142 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2143 2144 /* Split iov[2].size to 3 iov entries then split the segments */ 2145 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2146 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2147 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2148 2149 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2150 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2151 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2152 2153 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2154 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2155 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2156 2157 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2158 CU_ASSERT(rc == 0); 2159 CU_ASSERT(g_io_done == false); 2160 2161 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2162 stub_complete_io(6); 2163 CU_ASSERT(g_io_done == true); 2164 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2165 2166 /* Test multi vector command that needs to be split by strip and then needs to be 2167 * split further due to the capacity of parent IO child iovs. 2168 */ 2169 bdev->max_segment_size = 512; 2170 bdev->max_num_segments = 1; 2171 g_io_done = false; 2172 2173 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2174 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2175 iov[i].iov_len = 512 * 2; 2176 } 2177 2178 /* Each input iov.size is split into 2 iovs, 2179 * half of the input iov can fill all child iov entries of a single IO. 2180 */ 2181 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2182 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2183 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2184 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2185 2186 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2187 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2188 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2189 } 2190 2191 /* The remaining iov is split in the second round */ 2192 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2193 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2194 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2195 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2196 2197 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2198 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2199 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2200 } 2201 2202 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2203 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2204 CU_ASSERT(rc == 0); 2205 CU_ASSERT(g_io_done == false); 2206 2207 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2208 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2209 CU_ASSERT(g_io_done == false); 2210 2211 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2212 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2213 CU_ASSERT(g_io_done == true); 2214 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2215 2216 /* A wrong case, a child IO that is divided does 2217 * not meet the principle of multiples of block size, 2218 * and exits with error 2219 */ 2220 bdev->max_segment_size = 512; 2221 bdev->max_num_segments = 1; 2222 g_io_done = false; 2223 2224 iov[0].iov_base = (void *)0x10000; 2225 iov[0].iov_len = 512 + 256; 2226 iov[1].iov_base = (void *)0x20000; 2227 iov[1].iov_len = 256; 2228 2229 /* iov[0] is split to 512 and 256. 2230 * 256 is less than a block size, and it is found 2231 * in the next round of split that it is the first child IO smaller than 2232 * the block size, so the error exit 2233 */ 2234 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2235 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2236 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2237 2238 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2239 CU_ASSERT(rc == 0); 2240 CU_ASSERT(g_io_done == false); 2241 2242 /* First child IO is OK */ 2243 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2244 stub_complete_io(1); 2245 CU_ASSERT(g_io_done == true); 2246 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2247 2248 /* error exit */ 2249 stub_complete_io(1); 2250 CU_ASSERT(g_io_done == true); 2251 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2252 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2253 2254 /* Test multi vector command that needs to be split by strip and then needs to be 2255 * split further due to the capacity of child iovs. 2256 * 2257 * In this case, the last two iovs need to be split, but it will exceed the capacity 2258 * of child iovs, so it needs to wait until the first batch completed. 2259 */ 2260 bdev->max_segment_size = 512; 2261 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2262 g_io_done = false; 2263 2264 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2265 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2266 iov[i].iov_len = 512; 2267 } 2268 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2269 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2270 iov[i].iov_len = 512 * 2; 2271 } 2272 2273 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2274 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2275 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2276 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2277 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2278 } 2279 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2280 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2281 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2282 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2283 2284 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2285 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2286 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2287 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2288 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2289 2290 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2291 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2292 CU_ASSERT(rc == 0); 2293 CU_ASSERT(g_io_done == false); 2294 2295 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2296 stub_complete_io(1); 2297 CU_ASSERT(g_io_done == false); 2298 2299 /* Next round */ 2300 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2301 stub_complete_io(1); 2302 CU_ASSERT(g_io_done == true); 2303 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2304 2305 /* This case is similar to the previous one, but the io composed of 2306 * the last few entries of child iov is not enough for a blocklen, so they 2307 * cannot be put into this IO, but wait until the next time. 2308 */ 2309 bdev->max_segment_size = 512; 2310 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2311 g_io_done = false; 2312 2313 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2314 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2315 iov[i].iov_len = 512; 2316 } 2317 2318 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2319 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2320 iov[i].iov_len = 128; 2321 } 2322 2323 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2324 * Because the left 2 iov is not enough for a blocklen. 2325 */ 2326 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2327 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2328 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2329 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2330 } 2331 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2332 2333 /* The second child io waits until the end of the first child io before executing. 2334 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2335 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2336 */ 2337 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2338 1, 4); 2339 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2340 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2341 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2342 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2343 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2344 2345 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2346 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2347 CU_ASSERT(rc == 0); 2348 CU_ASSERT(g_io_done == false); 2349 2350 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2351 stub_complete_io(1); 2352 CU_ASSERT(g_io_done == false); 2353 2354 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2355 stub_complete_io(1); 2356 CU_ASSERT(g_io_done == true); 2357 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2358 2359 /* A very complicated case. Each sg entry exceeds max_segment_size and 2360 * needs to be split. At the same time, child io must be a multiple of blocklen. 2361 * At the same time, child iovcnt exceeds parent iovcnt. 2362 */ 2363 bdev->max_segment_size = 512 + 128; 2364 bdev->max_num_segments = 3; 2365 g_io_done = false; 2366 2367 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2368 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2369 iov[i].iov_len = 512 + 256; 2370 } 2371 2372 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2373 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2374 iov[i].iov_len = 512 + 128; 2375 } 2376 2377 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2378 * Consume 4 parent IO iov entries per for() round and 6 block size. 2379 * Generate 9 child IOs. 2380 */ 2381 for (i = 0; i < 3; i++) { 2382 uint32_t j = i * 4; 2383 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2384 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2385 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2386 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2387 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2388 2389 /* Child io must be a multiple of blocklen 2390 * iov[j + 2] must be split. If the third entry is also added, 2391 * the multiple of blocklen cannot be guaranteed. But it still 2392 * occupies one iov entry of the parent child iov. 2393 */ 2394 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2395 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2396 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2397 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2398 2399 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2400 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2401 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2402 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2403 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2404 } 2405 2406 /* Child iov position at 27, the 10th child IO 2407 * iov entry index is 3 * 4 and offset is 3 * 6 2408 */ 2409 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2410 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2411 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2412 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2413 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2414 2415 /* Child iov position at 30, the 11th child IO */ 2416 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2417 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2418 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2419 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2420 2421 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2422 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2423 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2424 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2425 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2426 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2427 2428 /* Consume 9 child IOs and 27 child iov entries. 2429 * Consume 4 parent IO iov entries per for() round and 6 block size. 2430 * Parent IO iov index start from 16 and block offset start from 24 2431 */ 2432 for (i = 0; i < 3; i++) { 2433 uint32_t j = i * 4 + 16; 2434 uint32_t offset = i * 6 + 24; 2435 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2436 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2437 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2438 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2439 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2440 2441 /* Child io must be a multiple of blocklen 2442 * iov[j + 2] must be split. If the third entry is also added, 2443 * the multiple of blocklen cannot be guaranteed. But it still 2444 * occupies one iov entry of the parent child iov. 2445 */ 2446 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2447 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2448 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2449 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2450 2451 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2452 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2453 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2454 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2455 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2456 } 2457 2458 /* The 22th child IO, child iov position at 30 */ 2459 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2460 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2461 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2462 2463 /* The third round */ 2464 /* Here is the 23nd child IO and child iovpos is 0 */ 2465 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2466 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2467 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2468 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2469 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2470 2471 /* The 24th child IO */ 2472 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2473 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2474 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2475 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2476 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2477 2478 /* The 25th child IO */ 2479 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2480 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2481 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2482 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2483 2484 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2485 50, io_done, NULL); 2486 CU_ASSERT(rc == 0); 2487 CU_ASSERT(g_io_done == false); 2488 2489 /* Parent IO supports up to 32 child iovs, so it is calculated that 2490 * a maximum of 11 IOs can be split at a time, and the 2491 * splitting will continue after the first batch is over. 2492 */ 2493 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2494 stub_complete_io(11); 2495 CU_ASSERT(g_io_done == false); 2496 2497 /* The 2nd round */ 2498 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2499 stub_complete_io(11); 2500 CU_ASSERT(g_io_done == false); 2501 2502 /* The last round */ 2503 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2504 stub_complete_io(3); 2505 CU_ASSERT(g_io_done == true); 2506 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2507 2508 /* Test an WRITE_ZEROES. This should also not be split. */ 2509 bdev->max_segment_size = 512; 2510 bdev->max_num_segments = 1; 2511 g_io_done = false; 2512 2513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2514 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2515 2516 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2517 CU_ASSERT(rc == 0); 2518 CU_ASSERT(g_io_done == false); 2519 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2520 stub_complete_io(1); 2521 CU_ASSERT(g_io_done == true); 2522 2523 /* Test an UNMAP. This should also not be split. */ 2524 g_io_done = false; 2525 2526 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2527 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2528 2529 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2530 CU_ASSERT(rc == 0); 2531 CU_ASSERT(g_io_done == false); 2532 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2533 stub_complete_io(1); 2534 CU_ASSERT(g_io_done == true); 2535 2536 /* Test a FLUSH. This should also not be split. */ 2537 g_io_done = false; 2538 2539 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2540 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2541 2542 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2543 CU_ASSERT(rc == 0); 2544 CU_ASSERT(g_io_done == false); 2545 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2546 stub_complete_io(1); 2547 CU_ASSERT(g_io_done == true); 2548 2549 /* Test a COPY. This should also not be split. */ 2550 g_io_done = false; 2551 2552 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2553 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2554 2555 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2556 CU_ASSERT(rc == 0); 2557 CU_ASSERT(g_io_done == false); 2558 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2559 stub_complete_io(1); 2560 CU_ASSERT(g_io_done == true); 2561 2562 spdk_put_io_channel(io_ch); 2563 spdk_bdev_close(desc); 2564 free_bdev(bdev); 2565 ut_fini_bdev(); 2566 } 2567 2568 static void 2569 bdev_io_mix_split_test(void) 2570 { 2571 struct spdk_bdev *bdev; 2572 struct spdk_bdev_desc *desc = NULL; 2573 struct spdk_io_channel *io_ch; 2574 struct spdk_bdev_opts bdev_opts = {}; 2575 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2576 struct ut_expected_io *expected_io; 2577 uint64_t i; 2578 int rc; 2579 2580 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2581 bdev_opts.bdev_io_pool_size = 512; 2582 bdev_opts.bdev_io_cache_size = 64; 2583 ut_init_bdev(&bdev_opts); 2584 2585 bdev = allocate_bdev("bdev0"); 2586 2587 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2588 CU_ASSERT(rc == 0); 2589 SPDK_CU_ASSERT_FATAL(desc != NULL); 2590 io_ch = spdk_bdev_get_io_channel(desc); 2591 CU_ASSERT(io_ch != NULL); 2592 2593 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2594 bdev->split_on_optimal_io_boundary = true; 2595 bdev->optimal_io_boundary = 16; 2596 2597 bdev->max_segment_size = 512; 2598 bdev->max_num_segments = 16; 2599 g_io_done = false; 2600 2601 /* IO crossing the IO boundary requires split 2602 * Total 2 child IOs. 2603 */ 2604 2605 /* The 1st child IO split the segment_size to multiple segment entry */ 2606 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2607 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2608 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2609 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2610 2611 /* The 2nd child IO split the segment_size to multiple segment entry */ 2612 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2613 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2614 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2615 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2616 2617 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2618 CU_ASSERT(rc == 0); 2619 CU_ASSERT(g_io_done == false); 2620 2621 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2622 stub_complete_io(2); 2623 CU_ASSERT(g_io_done == true); 2624 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2625 2626 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2627 bdev->max_segment_size = 15 * 512; 2628 bdev->max_num_segments = 1; 2629 g_io_done = false; 2630 2631 /* IO crossing the IO boundary requires split. 2632 * The 1st child IO segment size exceeds the max_segment_size, 2633 * So 1st child IO will be split to multiple segment entry. 2634 * Then it split to 2 child IOs because of the max_num_segments. 2635 * Total 3 child IOs. 2636 */ 2637 2638 /* The first 2 IOs are in an IO boundary. 2639 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2640 * So it split to the first 2 IOs. 2641 */ 2642 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2643 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2644 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2645 2646 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2647 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2648 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2649 2650 /* The 3rd Child IO is because of the io boundary */ 2651 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2652 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2653 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2654 2655 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2656 CU_ASSERT(rc == 0); 2657 CU_ASSERT(g_io_done == false); 2658 2659 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2660 stub_complete_io(3); 2661 CU_ASSERT(g_io_done == true); 2662 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2663 2664 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2665 bdev->max_segment_size = 17 * 512; 2666 bdev->max_num_segments = 1; 2667 g_io_done = false; 2668 2669 /* IO crossing the IO boundary requires split. 2670 * Child IO does not split. 2671 * Total 2 child IOs. 2672 */ 2673 2674 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2675 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2676 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2677 2678 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2679 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2680 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2681 2682 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2683 CU_ASSERT(rc == 0); 2684 CU_ASSERT(g_io_done == false); 2685 2686 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2687 stub_complete_io(2); 2688 CU_ASSERT(g_io_done == true); 2689 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2690 2691 /* Now set up a more complex, multi-vector command that needs to be split, 2692 * including splitting iovecs. 2693 * optimal_io_boundary < max_segment_size * max_num_segments 2694 */ 2695 bdev->max_segment_size = 3 * 512; 2696 bdev->max_num_segments = 6; 2697 g_io_done = false; 2698 2699 iov[0].iov_base = (void *)0x10000; 2700 iov[0].iov_len = 4 * 512; 2701 iov[1].iov_base = (void *)0x20000; 2702 iov[1].iov_len = 4 * 512; 2703 iov[2].iov_base = (void *)0x30000; 2704 iov[2].iov_len = 10 * 512; 2705 2706 /* IO crossing the IO boundary requires split. 2707 * The 1st child IO segment size exceeds the max_segment_size and after 2708 * splitting segment_size, the num_segments exceeds max_num_segments. 2709 * So 1st child IO will be split to 2 child IOs. 2710 * Total 3 child IOs. 2711 */ 2712 2713 /* The first 2 IOs are in an IO boundary. 2714 * After splitting segment size the segment num exceeds. 2715 * So it splits to 2 child IOs. 2716 */ 2717 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2718 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2719 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2720 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2721 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2722 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2723 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2724 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2725 2726 /* The 2nd child IO has the left segment entry */ 2727 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2728 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2729 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2730 2731 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2732 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2733 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2734 2735 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2736 CU_ASSERT(rc == 0); 2737 CU_ASSERT(g_io_done == false); 2738 2739 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2740 stub_complete_io(3); 2741 CU_ASSERT(g_io_done == true); 2742 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2743 2744 /* A very complicated case. Each sg entry exceeds max_segment_size 2745 * and split on io boundary. 2746 * optimal_io_boundary < max_segment_size * max_num_segments 2747 */ 2748 bdev->max_segment_size = 3 * 512; 2749 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2750 g_io_done = false; 2751 2752 for (i = 0; i < 20; i++) { 2753 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2754 iov[i].iov_len = 512 * 4; 2755 } 2756 2757 /* IO crossing the IO boundary requires split. 2758 * 80 block length can split 5 child IOs base on offset and IO boundary. 2759 * Each iov entry needs to be split to 2 entries because of max_segment_size 2760 * Total 5 child IOs. 2761 */ 2762 2763 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2764 * So each child IO occupies 8 child iov entries. 2765 */ 2766 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2767 for (i = 0; i < 4; i++) { 2768 int iovcnt = i * 2; 2769 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2770 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2771 } 2772 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2773 2774 /* 2nd child IO and total 16 child iov entries of parent IO */ 2775 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2776 for (i = 4; i < 8; i++) { 2777 int iovcnt = (i - 4) * 2; 2778 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2779 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2780 } 2781 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2782 2783 /* 3rd child IO and total 24 child iov entries of parent IO */ 2784 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2785 for (i = 8; i < 12; i++) { 2786 int iovcnt = (i - 8) * 2; 2787 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2788 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2789 } 2790 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2791 2792 /* 4th child IO and total 32 child iov entries of parent IO */ 2793 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2794 for (i = 12; i < 16; i++) { 2795 int iovcnt = (i - 12) * 2; 2796 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2797 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2798 } 2799 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2800 2801 /* 5th child IO and because of the child iov entry it should be split 2802 * in next round. 2803 */ 2804 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2805 for (i = 16; i < 20; i++) { 2806 int iovcnt = (i - 16) * 2; 2807 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2808 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2809 } 2810 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2811 2812 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2813 CU_ASSERT(rc == 0); 2814 CU_ASSERT(g_io_done == false); 2815 2816 /* First split round */ 2817 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2818 stub_complete_io(4); 2819 CU_ASSERT(g_io_done == false); 2820 2821 /* Second split round */ 2822 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2823 stub_complete_io(1); 2824 CU_ASSERT(g_io_done == true); 2825 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2826 2827 spdk_put_io_channel(io_ch); 2828 spdk_bdev_close(desc); 2829 free_bdev(bdev); 2830 ut_fini_bdev(); 2831 } 2832 2833 static void 2834 bdev_io_split_with_io_wait(void) 2835 { 2836 struct spdk_bdev *bdev; 2837 struct spdk_bdev_desc *desc = NULL; 2838 struct spdk_io_channel *io_ch; 2839 struct spdk_bdev_channel *channel; 2840 struct spdk_bdev_mgmt_channel *mgmt_ch; 2841 struct spdk_bdev_opts bdev_opts = {}; 2842 struct iovec iov[3]; 2843 struct ut_expected_io *expected_io; 2844 int rc; 2845 2846 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2847 bdev_opts.bdev_io_pool_size = 2; 2848 bdev_opts.bdev_io_cache_size = 1; 2849 ut_init_bdev(&bdev_opts); 2850 2851 bdev = allocate_bdev("bdev0"); 2852 2853 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2854 CU_ASSERT(rc == 0); 2855 CU_ASSERT(desc != NULL); 2856 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2857 io_ch = spdk_bdev_get_io_channel(desc); 2858 CU_ASSERT(io_ch != NULL); 2859 channel = spdk_io_channel_get_ctx(io_ch); 2860 mgmt_ch = channel->shared_resource->mgmt_ch; 2861 2862 bdev->optimal_io_boundary = 16; 2863 bdev->split_on_optimal_io_boundary = true; 2864 2865 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2866 CU_ASSERT(rc == 0); 2867 2868 /* Now test that a single-vector command is split correctly. 2869 * Offset 14, length 8, payload 0xF000 2870 * Child - Offset 14, length 2, payload 0xF000 2871 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2872 * 2873 * Set up the expected values before calling spdk_bdev_read_blocks 2874 */ 2875 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2876 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2877 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2878 2879 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2880 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2881 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2882 2883 /* The following children will be submitted sequentially due to the capacity of 2884 * spdk_bdev_io. 2885 */ 2886 2887 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2888 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2889 CU_ASSERT(rc == 0); 2890 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2891 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2892 2893 /* Completing the first read I/O will submit the first child */ 2894 stub_complete_io(1); 2895 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2896 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2897 2898 /* Completing the first child will submit the second child */ 2899 stub_complete_io(1); 2900 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2901 2902 /* Complete the second child I/O. This should result in our callback getting 2903 * invoked since the parent I/O is now complete. 2904 */ 2905 stub_complete_io(1); 2906 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2907 2908 /* Now set up a more complex, multi-vector command that needs to be split, 2909 * including splitting iovecs. 2910 */ 2911 iov[0].iov_base = (void *)0x10000; 2912 iov[0].iov_len = 512; 2913 iov[1].iov_base = (void *)0x20000; 2914 iov[1].iov_len = 20 * 512; 2915 iov[2].iov_base = (void *)0x30000; 2916 iov[2].iov_len = 11 * 512; 2917 2918 g_io_done = false; 2919 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2920 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2921 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2922 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2923 2924 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2925 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2926 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2927 2928 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2929 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2930 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2931 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2932 2933 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2934 CU_ASSERT(rc == 0); 2935 CU_ASSERT(g_io_done == false); 2936 2937 /* The following children will be submitted sequentially due to the capacity of 2938 * spdk_bdev_io. 2939 */ 2940 2941 /* Completing the first child will submit the second child */ 2942 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2943 stub_complete_io(1); 2944 CU_ASSERT(g_io_done == false); 2945 2946 /* Completing the second child will submit the third child */ 2947 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2948 stub_complete_io(1); 2949 CU_ASSERT(g_io_done == false); 2950 2951 /* Completing the third child will result in our callback getting invoked 2952 * since the parent I/O is now complete. 2953 */ 2954 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2955 stub_complete_io(1); 2956 CU_ASSERT(g_io_done == true); 2957 2958 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2959 2960 spdk_put_io_channel(io_ch); 2961 spdk_bdev_close(desc); 2962 free_bdev(bdev); 2963 ut_fini_bdev(); 2964 } 2965 2966 static void 2967 bdev_io_write_unit_split_test(void) 2968 { 2969 struct spdk_bdev *bdev; 2970 struct spdk_bdev_desc *desc = NULL; 2971 struct spdk_io_channel *io_ch; 2972 struct spdk_bdev_opts bdev_opts = {}; 2973 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 2974 struct ut_expected_io *expected_io; 2975 uint64_t i; 2976 int rc; 2977 2978 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2979 bdev_opts.bdev_io_pool_size = 512; 2980 bdev_opts.bdev_io_cache_size = 64; 2981 ut_init_bdev(&bdev_opts); 2982 2983 bdev = allocate_bdev("bdev0"); 2984 2985 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2986 CU_ASSERT(rc == 0); 2987 SPDK_CU_ASSERT_FATAL(desc != NULL); 2988 io_ch = spdk_bdev_get_io_channel(desc); 2989 CU_ASSERT(io_ch != NULL); 2990 2991 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 2992 bdev->write_unit_size = 32; 2993 bdev->split_on_write_unit = true; 2994 g_io_done = false; 2995 2996 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 2997 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 2998 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2999 3000 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 3001 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 3002 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3003 3004 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3005 CU_ASSERT(rc == 0); 3006 CU_ASSERT(g_io_done == false); 3007 3008 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3009 stub_complete_io(2); 3010 CU_ASSERT(g_io_done == true); 3011 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3012 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3013 3014 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 3015 * based on write_unit_size, not optimal_io_boundary */ 3016 bdev->split_on_optimal_io_boundary = true; 3017 bdev->optimal_io_boundary = 16; 3018 g_io_done = false; 3019 3020 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3021 CU_ASSERT(rc == 0); 3022 CU_ASSERT(g_io_done == false); 3023 3024 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3025 stub_complete_io(2); 3026 CU_ASSERT(g_io_done == true); 3027 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3028 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3029 3030 /* Write I/O should fail if it is smaller than write_unit_size */ 3031 g_io_done = false; 3032 3033 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3034 CU_ASSERT(rc == 0); 3035 CU_ASSERT(g_io_done == false); 3036 3037 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3038 poll_threads(); 3039 CU_ASSERT(g_io_done == true); 3040 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3041 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3042 3043 /* Same for I/O not aligned to write_unit_size */ 3044 g_io_done = false; 3045 3046 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3047 CU_ASSERT(rc == 0); 3048 CU_ASSERT(g_io_done == false); 3049 3050 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3051 poll_threads(); 3052 CU_ASSERT(g_io_done == true); 3053 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3054 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3055 3056 /* Write should fail if it needs to be split but there are not enough iovs to submit 3057 * an entire write unit */ 3058 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3059 g_io_done = false; 3060 3061 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3062 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3063 iov[i].iov_len = 512; 3064 } 3065 3066 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3067 io_done, NULL); 3068 CU_ASSERT(rc == 0); 3069 CU_ASSERT(g_io_done == false); 3070 3071 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3072 poll_threads(); 3073 CU_ASSERT(g_io_done == true); 3074 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3075 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3076 3077 spdk_put_io_channel(io_ch); 3078 spdk_bdev_close(desc); 3079 free_bdev(bdev); 3080 ut_fini_bdev(); 3081 } 3082 3083 static void 3084 bdev_io_alignment(void) 3085 { 3086 struct spdk_bdev *bdev; 3087 struct spdk_bdev_desc *desc = NULL; 3088 struct spdk_io_channel *io_ch; 3089 struct spdk_bdev_opts bdev_opts = {}; 3090 int rc; 3091 void *buf = NULL; 3092 struct iovec iovs[2]; 3093 int iovcnt; 3094 uint64_t alignment; 3095 3096 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3097 bdev_opts.bdev_io_pool_size = 20; 3098 bdev_opts.bdev_io_cache_size = 2; 3099 ut_init_bdev(&bdev_opts); 3100 3101 fn_table.submit_request = stub_submit_request_get_buf; 3102 bdev = allocate_bdev("bdev0"); 3103 3104 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3105 CU_ASSERT(rc == 0); 3106 CU_ASSERT(desc != NULL); 3107 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3108 io_ch = spdk_bdev_get_io_channel(desc); 3109 CU_ASSERT(io_ch != NULL); 3110 3111 /* Create aligned buffer */ 3112 rc = posix_memalign(&buf, 4096, 8192); 3113 SPDK_CU_ASSERT_FATAL(rc == 0); 3114 3115 /* Pass aligned single buffer with no alignment required */ 3116 alignment = 1; 3117 bdev->required_alignment = spdk_u32log2(alignment); 3118 3119 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3120 CU_ASSERT(rc == 0); 3121 stub_complete_io(1); 3122 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3123 alignment)); 3124 3125 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3126 CU_ASSERT(rc == 0); 3127 stub_complete_io(1); 3128 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3129 alignment)); 3130 3131 /* Pass unaligned single buffer with no alignment required */ 3132 alignment = 1; 3133 bdev->required_alignment = spdk_u32log2(alignment); 3134 3135 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3136 CU_ASSERT(rc == 0); 3137 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3138 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3139 stub_complete_io(1); 3140 3141 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3142 CU_ASSERT(rc == 0); 3143 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3144 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3145 stub_complete_io(1); 3146 3147 /* Pass unaligned single buffer with 512 alignment required */ 3148 alignment = 512; 3149 bdev->required_alignment = spdk_u32log2(alignment); 3150 3151 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3152 CU_ASSERT(rc == 0); 3153 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3154 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3155 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3156 alignment)); 3157 stub_complete_io(1); 3158 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3159 3160 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3161 CU_ASSERT(rc == 0); 3162 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3163 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3164 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3165 alignment)); 3166 stub_complete_io(1); 3167 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3168 3169 /* Pass unaligned single buffer with 4096 alignment required */ 3170 alignment = 4096; 3171 bdev->required_alignment = spdk_u32log2(alignment); 3172 3173 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3174 CU_ASSERT(rc == 0); 3175 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3176 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3177 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3178 alignment)); 3179 stub_complete_io(1); 3180 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3181 3182 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3183 CU_ASSERT(rc == 0); 3184 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3185 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3186 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3187 alignment)); 3188 stub_complete_io(1); 3189 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3190 3191 /* Pass aligned iovs with no alignment required */ 3192 alignment = 1; 3193 bdev->required_alignment = spdk_u32log2(alignment); 3194 3195 iovcnt = 1; 3196 iovs[0].iov_base = buf; 3197 iovs[0].iov_len = 512; 3198 3199 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3200 CU_ASSERT(rc == 0); 3201 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3202 stub_complete_io(1); 3203 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3204 3205 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3206 CU_ASSERT(rc == 0); 3207 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3208 stub_complete_io(1); 3209 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3210 3211 /* Pass unaligned iovs with no alignment required */ 3212 alignment = 1; 3213 bdev->required_alignment = spdk_u32log2(alignment); 3214 3215 iovcnt = 2; 3216 iovs[0].iov_base = buf + 16; 3217 iovs[0].iov_len = 256; 3218 iovs[1].iov_base = buf + 16 + 256 + 32; 3219 iovs[1].iov_len = 256; 3220 3221 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3222 CU_ASSERT(rc == 0); 3223 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3224 stub_complete_io(1); 3225 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3226 3227 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3228 CU_ASSERT(rc == 0); 3229 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3230 stub_complete_io(1); 3231 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3232 3233 /* Pass unaligned iov with 2048 alignment required */ 3234 alignment = 2048; 3235 bdev->required_alignment = spdk_u32log2(alignment); 3236 3237 iovcnt = 2; 3238 iovs[0].iov_base = buf + 16; 3239 iovs[0].iov_len = 256; 3240 iovs[1].iov_base = buf + 16 + 256 + 32; 3241 iovs[1].iov_len = 256; 3242 3243 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3244 CU_ASSERT(rc == 0); 3245 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3246 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3247 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3248 alignment)); 3249 stub_complete_io(1); 3250 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3251 3252 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3253 CU_ASSERT(rc == 0); 3254 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3255 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3256 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3257 alignment)); 3258 stub_complete_io(1); 3259 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3260 3261 /* Pass iov without allocated buffer without alignment required */ 3262 alignment = 1; 3263 bdev->required_alignment = spdk_u32log2(alignment); 3264 3265 iovcnt = 1; 3266 iovs[0].iov_base = NULL; 3267 iovs[0].iov_len = 0; 3268 3269 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3270 CU_ASSERT(rc == 0); 3271 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3272 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3273 alignment)); 3274 stub_complete_io(1); 3275 3276 /* Pass iov without allocated buffer with 1024 alignment required */ 3277 alignment = 1024; 3278 bdev->required_alignment = spdk_u32log2(alignment); 3279 3280 iovcnt = 1; 3281 iovs[0].iov_base = NULL; 3282 iovs[0].iov_len = 0; 3283 3284 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3285 CU_ASSERT(rc == 0); 3286 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3287 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3288 alignment)); 3289 stub_complete_io(1); 3290 3291 spdk_put_io_channel(io_ch); 3292 spdk_bdev_close(desc); 3293 free_bdev(bdev); 3294 fn_table.submit_request = stub_submit_request; 3295 ut_fini_bdev(); 3296 3297 free(buf); 3298 } 3299 3300 static void 3301 bdev_io_alignment_with_boundary(void) 3302 { 3303 struct spdk_bdev *bdev; 3304 struct spdk_bdev_desc *desc = NULL; 3305 struct spdk_io_channel *io_ch; 3306 struct spdk_bdev_opts bdev_opts = {}; 3307 int rc; 3308 void *buf = NULL; 3309 struct iovec iovs[2]; 3310 int iovcnt; 3311 uint64_t alignment; 3312 3313 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3314 bdev_opts.bdev_io_pool_size = 20; 3315 bdev_opts.bdev_io_cache_size = 2; 3316 bdev_opts.opts_size = sizeof(bdev_opts); 3317 ut_init_bdev(&bdev_opts); 3318 3319 fn_table.submit_request = stub_submit_request_get_buf; 3320 bdev = allocate_bdev("bdev0"); 3321 3322 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3323 CU_ASSERT(rc == 0); 3324 CU_ASSERT(desc != NULL); 3325 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3326 io_ch = spdk_bdev_get_io_channel(desc); 3327 CU_ASSERT(io_ch != NULL); 3328 3329 /* Create aligned buffer */ 3330 rc = posix_memalign(&buf, 4096, 131072); 3331 SPDK_CU_ASSERT_FATAL(rc == 0); 3332 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3333 3334 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3335 alignment = 512; 3336 bdev->required_alignment = spdk_u32log2(alignment); 3337 bdev->optimal_io_boundary = 2; 3338 bdev->split_on_optimal_io_boundary = true; 3339 3340 iovcnt = 1; 3341 iovs[0].iov_base = NULL; 3342 iovs[0].iov_len = 512 * 3; 3343 3344 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3345 CU_ASSERT(rc == 0); 3346 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3347 stub_complete_io(2); 3348 3349 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3350 alignment = 512; 3351 bdev->required_alignment = spdk_u32log2(alignment); 3352 bdev->optimal_io_boundary = 16; 3353 bdev->split_on_optimal_io_boundary = true; 3354 3355 iovcnt = 1; 3356 iovs[0].iov_base = NULL; 3357 iovs[0].iov_len = 512 * 16; 3358 3359 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3360 CU_ASSERT(rc == 0); 3361 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3362 stub_complete_io(2); 3363 3364 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3365 alignment = 512; 3366 bdev->required_alignment = spdk_u32log2(alignment); 3367 bdev->optimal_io_boundary = 128; 3368 bdev->split_on_optimal_io_boundary = true; 3369 3370 iovcnt = 1; 3371 iovs[0].iov_base = buf + 16; 3372 iovs[0].iov_len = 512 * 160; 3373 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3374 CU_ASSERT(rc == 0); 3375 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3376 stub_complete_io(2); 3377 3378 /* 512 * 3 with 2 IO boundary */ 3379 alignment = 512; 3380 bdev->required_alignment = spdk_u32log2(alignment); 3381 bdev->optimal_io_boundary = 2; 3382 bdev->split_on_optimal_io_boundary = true; 3383 3384 iovcnt = 2; 3385 iovs[0].iov_base = buf + 16; 3386 iovs[0].iov_len = 512; 3387 iovs[1].iov_base = buf + 16 + 512 + 32; 3388 iovs[1].iov_len = 1024; 3389 3390 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3391 CU_ASSERT(rc == 0); 3392 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3393 stub_complete_io(2); 3394 3395 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3396 CU_ASSERT(rc == 0); 3397 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3398 stub_complete_io(2); 3399 3400 /* 512 * 64 with 32 IO boundary */ 3401 bdev->optimal_io_boundary = 32; 3402 iovcnt = 2; 3403 iovs[0].iov_base = buf + 16; 3404 iovs[0].iov_len = 16384; 3405 iovs[1].iov_base = buf + 16 + 16384 + 32; 3406 iovs[1].iov_len = 16384; 3407 3408 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3409 CU_ASSERT(rc == 0); 3410 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3411 stub_complete_io(3); 3412 3413 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3414 CU_ASSERT(rc == 0); 3415 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3416 stub_complete_io(3); 3417 3418 /* 512 * 160 with 32 IO boundary */ 3419 iovcnt = 1; 3420 iovs[0].iov_base = buf + 16; 3421 iovs[0].iov_len = 16384 + 65536; 3422 3423 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3424 CU_ASSERT(rc == 0); 3425 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3426 stub_complete_io(6); 3427 3428 spdk_put_io_channel(io_ch); 3429 spdk_bdev_close(desc); 3430 free_bdev(bdev); 3431 fn_table.submit_request = stub_submit_request; 3432 ut_fini_bdev(); 3433 3434 free(buf); 3435 } 3436 3437 static void 3438 histogram_status_cb(void *cb_arg, int status) 3439 { 3440 g_status = status; 3441 } 3442 3443 static void 3444 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3445 { 3446 g_status = status; 3447 g_histogram = histogram; 3448 } 3449 3450 static void 3451 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3452 uint64_t total, uint64_t so_far) 3453 { 3454 g_count += count; 3455 } 3456 3457 static void 3458 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3459 { 3460 spdk_histogram_data_fn cb_fn = cb_arg; 3461 3462 g_status = status; 3463 3464 if (status == 0) { 3465 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3466 } 3467 } 3468 3469 static void 3470 bdev_histograms(void) 3471 { 3472 struct spdk_bdev *bdev; 3473 struct spdk_bdev_desc *desc = NULL; 3474 struct spdk_io_channel *ch; 3475 struct spdk_histogram_data *histogram; 3476 uint8_t buf[4096]; 3477 int rc; 3478 3479 ut_init_bdev(NULL); 3480 3481 bdev = allocate_bdev("bdev"); 3482 3483 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3484 CU_ASSERT(rc == 0); 3485 CU_ASSERT(desc != NULL); 3486 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3487 3488 ch = spdk_bdev_get_io_channel(desc); 3489 CU_ASSERT(ch != NULL); 3490 3491 /* Enable histogram */ 3492 g_status = -1; 3493 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3494 poll_threads(); 3495 CU_ASSERT(g_status == 0); 3496 CU_ASSERT(bdev->internal.histogram_enabled == true); 3497 3498 /* Allocate histogram */ 3499 histogram = spdk_histogram_data_alloc(); 3500 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3501 3502 /* Check if histogram is zeroed */ 3503 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3504 poll_threads(); 3505 CU_ASSERT(g_status == 0); 3506 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3507 3508 g_count = 0; 3509 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3510 3511 CU_ASSERT(g_count == 0); 3512 3513 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3514 CU_ASSERT(rc == 0); 3515 3516 spdk_delay_us(10); 3517 stub_complete_io(1); 3518 poll_threads(); 3519 3520 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3521 CU_ASSERT(rc == 0); 3522 3523 spdk_delay_us(10); 3524 stub_complete_io(1); 3525 poll_threads(); 3526 3527 /* Check if histogram gathered data from all I/O channels */ 3528 g_histogram = NULL; 3529 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3530 poll_threads(); 3531 CU_ASSERT(g_status == 0); 3532 CU_ASSERT(bdev->internal.histogram_enabled == true); 3533 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3534 3535 g_count = 0; 3536 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3537 CU_ASSERT(g_count == 2); 3538 3539 g_count = 0; 3540 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3541 CU_ASSERT(g_status == 0); 3542 CU_ASSERT(g_count == 2); 3543 3544 /* Disable histogram */ 3545 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3546 poll_threads(); 3547 CU_ASSERT(g_status == 0); 3548 CU_ASSERT(bdev->internal.histogram_enabled == false); 3549 3550 /* Try to run histogram commands on disabled bdev */ 3551 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3552 poll_threads(); 3553 CU_ASSERT(g_status == -EFAULT); 3554 3555 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3556 CU_ASSERT(g_status == -EFAULT); 3557 3558 spdk_histogram_data_free(histogram); 3559 spdk_put_io_channel(ch); 3560 spdk_bdev_close(desc); 3561 free_bdev(bdev); 3562 ut_fini_bdev(); 3563 } 3564 3565 static void 3566 _bdev_compare(bool emulated) 3567 { 3568 struct spdk_bdev *bdev; 3569 struct spdk_bdev_desc *desc = NULL; 3570 struct spdk_io_channel *ioch; 3571 struct ut_expected_io *expected_io; 3572 uint64_t offset, num_blocks; 3573 uint32_t num_completed; 3574 char aa_buf[512]; 3575 char bb_buf[512]; 3576 struct iovec compare_iov; 3577 uint8_t expected_io_type; 3578 int rc; 3579 3580 if (emulated) { 3581 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3582 } else { 3583 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3584 } 3585 3586 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3587 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3588 3589 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3590 3591 ut_init_bdev(NULL); 3592 fn_table.submit_request = stub_submit_request_get_buf; 3593 bdev = allocate_bdev("bdev"); 3594 3595 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3596 CU_ASSERT_EQUAL(rc, 0); 3597 SPDK_CU_ASSERT_FATAL(desc != NULL); 3598 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3599 ioch = spdk_bdev_get_io_channel(desc); 3600 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3601 3602 fn_table.submit_request = stub_submit_request_get_buf; 3603 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3604 3605 offset = 50; 3606 num_blocks = 1; 3607 compare_iov.iov_base = aa_buf; 3608 compare_iov.iov_len = sizeof(aa_buf); 3609 3610 /* 1. successful compare */ 3611 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3612 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3613 3614 g_io_done = false; 3615 g_compare_read_buf = aa_buf; 3616 g_compare_read_buf_len = sizeof(aa_buf); 3617 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3618 CU_ASSERT_EQUAL(rc, 0); 3619 num_completed = stub_complete_io(1); 3620 CU_ASSERT_EQUAL(num_completed, 1); 3621 CU_ASSERT(g_io_done == true); 3622 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3623 3624 /* 2. miscompare */ 3625 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3626 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3627 3628 g_io_done = false; 3629 g_compare_read_buf = bb_buf; 3630 g_compare_read_buf_len = sizeof(bb_buf); 3631 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3632 CU_ASSERT_EQUAL(rc, 0); 3633 num_completed = stub_complete_io(1); 3634 CU_ASSERT_EQUAL(num_completed, 1); 3635 CU_ASSERT(g_io_done == true); 3636 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3637 3638 spdk_put_io_channel(ioch); 3639 spdk_bdev_close(desc); 3640 free_bdev(bdev); 3641 fn_table.submit_request = stub_submit_request; 3642 ut_fini_bdev(); 3643 3644 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3645 3646 g_compare_read_buf = NULL; 3647 } 3648 3649 static void 3650 _bdev_compare_with_md(bool emulated) 3651 { 3652 struct spdk_bdev *bdev; 3653 struct spdk_bdev_desc *desc = NULL; 3654 struct spdk_io_channel *ioch; 3655 struct ut_expected_io *expected_io; 3656 uint64_t offset, num_blocks; 3657 uint32_t num_completed; 3658 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3659 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3660 char buf_miscompare[1024 /* 2 * blocklen */]; 3661 char md_buf[16]; 3662 char md_buf_miscompare[16]; 3663 struct iovec compare_iov; 3664 uint8_t expected_io_type; 3665 int rc; 3666 3667 if (emulated) { 3668 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3669 } else { 3670 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3671 } 3672 3673 memset(buf, 0xaa, sizeof(buf)); 3674 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3675 /* make last md different */ 3676 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3677 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3678 memset(md_buf, 0xaa, 16); 3679 memset(md_buf_miscompare, 0xbb, 16); 3680 3681 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3682 3683 ut_init_bdev(NULL); 3684 fn_table.submit_request = stub_submit_request_get_buf; 3685 bdev = allocate_bdev("bdev"); 3686 3687 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3688 CU_ASSERT_EQUAL(rc, 0); 3689 SPDK_CU_ASSERT_FATAL(desc != NULL); 3690 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3691 ioch = spdk_bdev_get_io_channel(desc); 3692 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3693 3694 fn_table.submit_request = stub_submit_request_get_buf; 3695 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3696 3697 offset = 50; 3698 num_blocks = 2; 3699 3700 /* interleaved md & data */ 3701 bdev->md_interleave = true; 3702 bdev->md_len = 8; 3703 bdev->blocklen = 512 + 8; 3704 compare_iov.iov_base = buf; 3705 compare_iov.iov_len = sizeof(buf); 3706 3707 /* 1. successful compare with md interleaved */ 3708 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3709 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3710 3711 g_io_done = false; 3712 g_compare_read_buf = buf; 3713 g_compare_read_buf_len = sizeof(buf); 3714 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3715 CU_ASSERT_EQUAL(rc, 0); 3716 num_completed = stub_complete_io(1); 3717 CU_ASSERT_EQUAL(num_completed, 1); 3718 CU_ASSERT(g_io_done == true); 3719 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3720 3721 /* 2. miscompare with md interleaved */ 3722 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3723 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3724 3725 g_io_done = false; 3726 g_compare_read_buf = buf_interleaved_miscompare; 3727 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3728 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3729 CU_ASSERT_EQUAL(rc, 0); 3730 num_completed = stub_complete_io(1); 3731 CU_ASSERT_EQUAL(num_completed, 1); 3732 CU_ASSERT(g_io_done == true); 3733 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3734 3735 /* Separate data & md buffers */ 3736 bdev->md_interleave = false; 3737 bdev->blocklen = 512; 3738 compare_iov.iov_base = buf; 3739 compare_iov.iov_len = 1024; 3740 3741 /* 3. successful compare with md separated */ 3742 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3743 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3744 3745 g_io_done = false; 3746 g_compare_read_buf = buf; 3747 g_compare_read_buf_len = 1024; 3748 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3749 g_compare_md_buf = md_buf; 3750 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3751 offset, num_blocks, io_done, NULL); 3752 CU_ASSERT_EQUAL(rc, 0); 3753 num_completed = stub_complete_io(1); 3754 CU_ASSERT_EQUAL(num_completed, 1); 3755 CU_ASSERT(g_io_done == true); 3756 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3757 3758 /* 4. miscompare with md separated where md buf is different */ 3759 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3760 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3761 3762 g_io_done = false; 3763 g_compare_read_buf = buf; 3764 g_compare_read_buf_len = 1024; 3765 g_compare_md_buf = md_buf_miscompare; 3766 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3767 offset, num_blocks, io_done, NULL); 3768 CU_ASSERT_EQUAL(rc, 0); 3769 num_completed = stub_complete_io(1); 3770 CU_ASSERT_EQUAL(num_completed, 1); 3771 CU_ASSERT(g_io_done == true); 3772 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3773 3774 /* 5. miscompare with md separated where buf is different */ 3775 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3776 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3777 3778 g_io_done = false; 3779 g_compare_read_buf = buf_miscompare; 3780 g_compare_read_buf_len = sizeof(buf_miscompare); 3781 g_compare_md_buf = md_buf; 3782 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3783 offset, num_blocks, io_done, NULL); 3784 CU_ASSERT_EQUAL(rc, 0); 3785 num_completed = stub_complete_io(1); 3786 CU_ASSERT_EQUAL(num_completed, 1); 3787 CU_ASSERT(g_io_done == true); 3788 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3789 3790 bdev->md_len = 0; 3791 g_compare_md_buf = NULL; 3792 3793 spdk_put_io_channel(ioch); 3794 spdk_bdev_close(desc); 3795 free_bdev(bdev); 3796 fn_table.submit_request = stub_submit_request; 3797 ut_fini_bdev(); 3798 3799 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3800 3801 g_compare_read_buf = NULL; 3802 } 3803 3804 static void 3805 bdev_compare(void) 3806 { 3807 _bdev_compare(false); 3808 _bdev_compare_with_md(false); 3809 } 3810 3811 static void 3812 bdev_compare_emulated(void) 3813 { 3814 _bdev_compare(true); 3815 _bdev_compare_with_md(true); 3816 } 3817 3818 static void 3819 bdev_compare_and_write(void) 3820 { 3821 struct spdk_bdev *bdev; 3822 struct spdk_bdev_desc *desc = NULL; 3823 struct spdk_io_channel *ioch; 3824 struct ut_expected_io *expected_io; 3825 uint64_t offset, num_blocks; 3826 uint32_t num_completed; 3827 char aa_buf[512]; 3828 char bb_buf[512]; 3829 char cc_buf[512]; 3830 char write_buf[512]; 3831 struct iovec compare_iov; 3832 struct iovec write_iov; 3833 int rc; 3834 3835 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3836 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3837 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3838 3839 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3840 3841 ut_init_bdev(NULL); 3842 fn_table.submit_request = stub_submit_request_get_buf; 3843 bdev = allocate_bdev("bdev"); 3844 3845 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3846 CU_ASSERT_EQUAL(rc, 0); 3847 SPDK_CU_ASSERT_FATAL(desc != NULL); 3848 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3849 ioch = spdk_bdev_get_io_channel(desc); 3850 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3851 3852 fn_table.submit_request = stub_submit_request_get_buf; 3853 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3854 3855 offset = 50; 3856 num_blocks = 1; 3857 compare_iov.iov_base = aa_buf; 3858 compare_iov.iov_len = sizeof(aa_buf); 3859 write_iov.iov_base = bb_buf; 3860 write_iov.iov_len = sizeof(bb_buf); 3861 3862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3863 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3864 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3865 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3866 3867 g_io_done = false; 3868 g_compare_read_buf = aa_buf; 3869 g_compare_read_buf_len = sizeof(aa_buf); 3870 memset(write_buf, 0, sizeof(write_buf)); 3871 g_compare_write_buf = write_buf; 3872 g_compare_write_buf_len = sizeof(write_buf); 3873 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3874 offset, num_blocks, io_done, NULL); 3875 /* Trigger range locking */ 3876 poll_threads(); 3877 CU_ASSERT_EQUAL(rc, 0); 3878 num_completed = stub_complete_io(1); 3879 CU_ASSERT_EQUAL(num_completed, 1); 3880 CU_ASSERT(g_io_done == false); 3881 num_completed = stub_complete_io(1); 3882 /* Trigger range unlocking */ 3883 poll_threads(); 3884 CU_ASSERT_EQUAL(num_completed, 1); 3885 CU_ASSERT(g_io_done == true); 3886 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3887 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3888 3889 /* Test miscompare */ 3890 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3891 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3892 3893 g_io_done = false; 3894 g_compare_read_buf = cc_buf; 3895 g_compare_read_buf_len = sizeof(cc_buf); 3896 memset(write_buf, 0, sizeof(write_buf)); 3897 g_compare_write_buf = write_buf; 3898 g_compare_write_buf_len = sizeof(write_buf); 3899 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3900 offset, num_blocks, io_done, NULL); 3901 /* Trigger range locking */ 3902 poll_threads(); 3903 CU_ASSERT_EQUAL(rc, 0); 3904 num_completed = stub_complete_io(1); 3905 /* Trigger range unlocking earlier because we expect error here */ 3906 poll_threads(); 3907 CU_ASSERT_EQUAL(num_completed, 1); 3908 CU_ASSERT(g_io_done == true); 3909 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3910 num_completed = stub_complete_io(1); 3911 CU_ASSERT_EQUAL(num_completed, 0); 3912 3913 spdk_put_io_channel(ioch); 3914 spdk_bdev_close(desc); 3915 free_bdev(bdev); 3916 fn_table.submit_request = stub_submit_request; 3917 ut_fini_bdev(); 3918 3919 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3920 3921 g_compare_read_buf = NULL; 3922 g_compare_write_buf = NULL; 3923 } 3924 3925 static void 3926 bdev_write_zeroes(void) 3927 { 3928 struct spdk_bdev *bdev; 3929 struct spdk_bdev_desc *desc = NULL; 3930 struct spdk_io_channel *ioch; 3931 struct ut_expected_io *expected_io; 3932 uint64_t offset, num_io_blocks, num_blocks; 3933 uint32_t num_completed, num_requests; 3934 int rc; 3935 3936 ut_init_bdev(NULL); 3937 bdev = allocate_bdev("bdev"); 3938 3939 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3940 CU_ASSERT_EQUAL(rc, 0); 3941 SPDK_CU_ASSERT_FATAL(desc != NULL); 3942 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3943 ioch = spdk_bdev_get_io_channel(desc); 3944 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3945 3946 fn_table.submit_request = stub_submit_request; 3947 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3948 3949 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3950 bdev->md_len = 0; 3951 bdev->blocklen = 4096; 3952 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3953 3954 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3955 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3956 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3957 CU_ASSERT_EQUAL(rc, 0); 3958 num_completed = stub_complete_io(1); 3959 CU_ASSERT_EQUAL(num_completed, 1); 3960 3961 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3962 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3963 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3964 num_requests = 2; 3965 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3966 3967 for (offset = 0; offset < num_requests; ++offset) { 3968 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3969 offset * num_io_blocks, num_io_blocks, 0); 3970 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3971 } 3972 3973 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3974 CU_ASSERT_EQUAL(rc, 0); 3975 num_completed = stub_complete_io(num_requests); 3976 CU_ASSERT_EQUAL(num_completed, num_requests); 3977 3978 /* Check that the splitting is correct if bdev has interleaved metadata */ 3979 bdev->md_interleave = true; 3980 bdev->md_len = 64; 3981 bdev->blocklen = 4096 + 64; 3982 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3983 3984 num_requests = offset = 0; 3985 while (offset < num_blocks) { 3986 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3987 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3988 offset, num_io_blocks, 0); 3989 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3990 offset += num_io_blocks; 3991 num_requests++; 3992 } 3993 3994 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3995 CU_ASSERT_EQUAL(rc, 0); 3996 num_completed = stub_complete_io(num_requests); 3997 CU_ASSERT_EQUAL(num_completed, num_requests); 3998 num_completed = stub_complete_io(num_requests); 3999 assert(num_completed == 0); 4000 4001 /* Check the the same for separate metadata buffer */ 4002 bdev->md_interleave = false; 4003 bdev->md_len = 64; 4004 bdev->blocklen = 4096; 4005 4006 num_requests = offset = 0; 4007 while (offset < num_blocks) { 4008 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 4009 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4010 offset, num_io_blocks, 0); 4011 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 4012 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4013 offset += num_io_blocks; 4014 num_requests++; 4015 } 4016 4017 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4018 CU_ASSERT_EQUAL(rc, 0); 4019 num_completed = stub_complete_io(num_requests); 4020 CU_ASSERT_EQUAL(num_completed, num_requests); 4021 4022 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 4023 spdk_put_io_channel(ioch); 4024 spdk_bdev_close(desc); 4025 free_bdev(bdev); 4026 ut_fini_bdev(); 4027 } 4028 4029 static void 4030 bdev_zcopy_write(void) 4031 { 4032 struct spdk_bdev *bdev; 4033 struct spdk_bdev_desc *desc = NULL; 4034 struct spdk_io_channel *ioch; 4035 struct ut_expected_io *expected_io; 4036 uint64_t offset, num_blocks; 4037 uint32_t num_completed; 4038 char aa_buf[512]; 4039 struct iovec iov; 4040 int rc; 4041 const bool populate = false; 4042 const bool commit = true; 4043 4044 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4045 4046 ut_init_bdev(NULL); 4047 bdev = allocate_bdev("bdev"); 4048 4049 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4050 CU_ASSERT_EQUAL(rc, 0); 4051 SPDK_CU_ASSERT_FATAL(desc != NULL); 4052 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4053 ioch = spdk_bdev_get_io_channel(desc); 4054 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4055 4056 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4057 4058 offset = 50; 4059 num_blocks = 1; 4060 iov.iov_base = NULL; 4061 iov.iov_len = 0; 4062 4063 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4064 g_zcopy_read_buf_len = (uint32_t) -1; 4065 /* Do a zcopy start for a write (populate=false) */ 4066 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4067 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4068 g_io_done = false; 4069 g_zcopy_write_buf = aa_buf; 4070 g_zcopy_write_buf_len = sizeof(aa_buf); 4071 g_zcopy_bdev_io = NULL; 4072 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4073 CU_ASSERT_EQUAL(rc, 0); 4074 num_completed = stub_complete_io(1); 4075 CU_ASSERT_EQUAL(num_completed, 1); 4076 CU_ASSERT(g_io_done == true); 4077 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4078 /* Check that the iov has been set up */ 4079 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4080 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4081 /* Check that the bdev_io has been saved */ 4082 CU_ASSERT(g_zcopy_bdev_io != NULL); 4083 /* Now do the zcopy end for a write (commit=true) */ 4084 g_io_done = false; 4085 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4086 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4087 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4088 CU_ASSERT_EQUAL(rc, 0); 4089 num_completed = stub_complete_io(1); 4090 CU_ASSERT_EQUAL(num_completed, 1); 4091 CU_ASSERT(g_io_done == true); 4092 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4093 /* Check the g_zcopy are reset by io_done */ 4094 CU_ASSERT(g_zcopy_write_buf == NULL); 4095 CU_ASSERT(g_zcopy_write_buf_len == 0); 4096 /* Check that io_done has freed the g_zcopy_bdev_io */ 4097 CU_ASSERT(g_zcopy_bdev_io == NULL); 4098 4099 /* Check the zcopy read buffer has not been touched which 4100 * ensures that the correct buffers were used. 4101 */ 4102 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4103 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4104 4105 spdk_put_io_channel(ioch); 4106 spdk_bdev_close(desc); 4107 free_bdev(bdev); 4108 ut_fini_bdev(); 4109 } 4110 4111 static void 4112 bdev_zcopy_read(void) 4113 { 4114 struct spdk_bdev *bdev; 4115 struct spdk_bdev_desc *desc = NULL; 4116 struct spdk_io_channel *ioch; 4117 struct ut_expected_io *expected_io; 4118 uint64_t offset, num_blocks; 4119 uint32_t num_completed; 4120 char aa_buf[512]; 4121 struct iovec iov; 4122 int rc; 4123 const bool populate = true; 4124 const bool commit = false; 4125 4126 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4127 4128 ut_init_bdev(NULL); 4129 bdev = allocate_bdev("bdev"); 4130 4131 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4132 CU_ASSERT_EQUAL(rc, 0); 4133 SPDK_CU_ASSERT_FATAL(desc != NULL); 4134 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4135 ioch = spdk_bdev_get_io_channel(desc); 4136 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4137 4138 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4139 4140 offset = 50; 4141 num_blocks = 1; 4142 iov.iov_base = NULL; 4143 iov.iov_len = 0; 4144 4145 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4146 g_zcopy_write_buf_len = (uint32_t) -1; 4147 4148 /* Do a zcopy start for a read (populate=true) */ 4149 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4150 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4151 g_io_done = false; 4152 g_zcopy_read_buf = aa_buf; 4153 g_zcopy_read_buf_len = sizeof(aa_buf); 4154 g_zcopy_bdev_io = NULL; 4155 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4156 CU_ASSERT_EQUAL(rc, 0); 4157 num_completed = stub_complete_io(1); 4158 CU_ASSERT_EQUAL(num_completed, 1); 4159 CU_ASSERT(g_io_done == true); 4160 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4161 /* Check that the iov has been set up */ 4162 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4163 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4164 /* Check that the bdev_io has been saved */ 4165 CU_ASSERT(g_zcopy_bdev_io != NULL); 4166 4167 /* Now do the zcopy end for a read (commit=false) */ 4168 g_io_done = false; 4169 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4170 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4171 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4172 CU_ASSERT_EQUAL(rc, 0); 4173 num_completed = stub_complete_io(1); 4174 CU_ASSERT_EQUAL(num_completed, 1); 4175 CU_ASSERT(g_io_done == true); 4176 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4177 /* Check the g_zcopy are reset by io_done */ 4178 CU_ASSERT(g_zcopy_read_buf == NULL); 4179 CU_ASSERT(g_zcopy_read_buf_len == 0); 4180 /* Check that io_done has freed the g_zcopy_bdev_io */ 4181 CU_ASSERT(g_zcopy_bdev_io == NULL); 4182 4183 /* Check the zcopy write buffer has not been touched which 4184 * ensures that the correct buffers were used. 4185 */ 4186 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4187 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4188 4189 spdk_put_io_channel(ioch); 4190 spdk_bdev_close(desc); 4191 free_bdev(bdev); 4192 ut_fini_bdev(); 4193 } 4194 4195 static void 4196 bdev_open_while_hotremove(void) 4197 { 4198 struct spdk_bdev *bdev; 4199 struct spdk_bdev_desc *desc[2] = {}; 4200 int rc; 4201 4202 bdev = allocate_bdev("bdev"); 4203 4204 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4205 CU_ASSERT(rc == 0); 4206 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4207 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4208 4209 spdk_bdev_unregister(bdev, NULL, NULL); 4210 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4211 poll_threads(); 4212 4213 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4214 CU_ASSERT(rc == -ENODEV); 4215 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4216 4217 spdk_bdev_close(desc[0]); 4218 free_bdev(bdev); 4219 } 4220 4221 static void 4222 bdev_close_while_hotremove(void) 4223 { 4224 struct spdk_bdev *bdev; 4225 struct spdk_bdev_desc *desc = NULL; 4226 int rc = 0; 4227 4228 bdev = allocate_bdev("bdev"); 4229 4230 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4231 CU_ASSERT_EQUAL(rc, 0); 4232 SPDK_CU_ASSERT_FATAL(desc != NULL); 4233 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4234 4235 /* Simulate hot-unplug by unregistering bdev */ 4236 g_event_type1 = 0xFF; 4237 g_unregister_arg = NULL; 4238 g_unregister_rc = -1; 4239 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4240 /* Close device while remove event is in flight */ 4241 spdk_bdev_close(desc); 4242 4243 /* Ensure that unregister callback is delayed */ 4244 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4245 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4246 4247 poll_threads(); 4248 4249 /* Event callback shall not be issued because device was closed */ 4250 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4251 /* Unregister callback is issued */ 4252 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4253 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4254 4255 free_bdev(bdev); 4256 } 4257 4258 static void 4259 bdev_open_ext(void) 4260 { 4261 struct spdk_bdev *bdev; 4262 struct spdk_bdev_desc *desc1 = NULL; 4263 struct spdk_bdev_desc *desc2 = NULL; 4264 int rc = 0; 4265 4266 bdev = allocate_bdev("bdev"); 4267 4268 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4269 CU_ASSERT_EQUAL(rc, -EINVAL); 4270 4271 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4272 CU_ASSERT_EQUAL(rc, 0); 4273 4274 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4275 CU_ASSERT_EQUAL(rc, 0); 4276 4277 g_event_type1 = 0xFF; 4278 g_event_type2 = 0xFF; 4279 4280 /* Simulate hot-unplug by unregistering bdev */ 4281 spdk_bdev_unregister(bdev, NULL, NULL); 4282 poll_threads(); 4283 4284 /* Check if correct events have been triggered in event callback fn */ 4285 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4286 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4287 4288 free_bdev(bdev); 4289 poll_threads(); 4290 } 4291 4292 static void 4293 bdev_open_ext_unregister(void) 4294 { 4295 struct spdk_bdev *bdev; 4296 struct spdk_bdev_desc *desc1 = NULL; 4297 struct spdk_bdev_desc *desc2 = NULL; 4298 struct spdk_bdev_desc *desc3 = NULL; 4299 struct spdk_bdev_desc *desc4 = NULL; 4300 int rc = 0; 4301 4302 bdev = allocate_bdev("bdev"); 4303 4304 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4305 CU_ASSERT_EQUAL(rc, -EINVAL); 4306 4307 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4308 CU_ASSERT_EQUAL(rc, 0); 4309 4310 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4311 CU_ASSERT_EQUAL(rc, 0); 4312 4313 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4314 CU_ASSERT_EQUAL(rc, 0); 4315 4316 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4317 CU_ASSERT_EQUAL(rc, 0); 4318 4319 g_event_type1 = 0xFF; 4320 g_event_type2 = 0xFF; 4321 g_event_type3 = 0xFF; 4322 g_event_type4 = 0xFF; 4323 4324 g_unregister_arg = NULL; 4325 g_unregister_rc = -1; 4326 4327 /* Simulate hot-unplug by unregistering bdev */ 4328 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4329 4330 /* 4331 * Unregister is handled asynchronously and event callback 4332 * (i.e., above bdev_open_cbN) will be called. 4333 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4334 * close the desc3 and desc4 so that the bdev is not closed. 4335 */ 4336 poll_threads(); 4337 4338 /* Check if correct events have been triggered in event callback fn */ 4339 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4340 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4341 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4342 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4343 4344 /* Check that unregister callback is delayed */ 4345 CU_ASSERT(g_unregister_arg == NULL); 4346 CU_ASSERT(g_unregister_rc == -1); 4347 4348 /* 4349 * Explicitly close desc3. As desc4 is still opened there, the 4350 * unergister callback is still delayed to execute. 4351 */ 4352 spdk_bdev_close(desc3); 4353 CU_ASSERT(g_unregister_arg == NULL); 4354 CU_ASSERT(g_unregister_rc == -1); 4355 4356 /* 4357 * Explicitly close desc4 to trigger the ongoing bdev unregister 4358 * operation after last desc is closed. 4359 */ 4360 spdk_bdev_close(desc4); 4361 4362 /* Poll the thread for the async unregister operation */ 4363 poll_threads(); 4364 4365 /* Check that unregister callback is executed */ 4366 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4367 CU_ASSERT(g_unregister_rc == 0); 4368 4369 free_bdev(bdev); 4370 poll_threads(); 4371 } 4372 4373 struct timeout_io_cb_arg { 4374 struct iovec iov; 4375 uint8_t type; 4376 }; 4377 4378 static int 4379 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4380 { 4381 struct spdk_bdev_io *bdev_io; 4382 int n = 0; 4383 4384 if (!ch) { 4385 return -1; 4386 } 4387 4388 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4389 n++; 4390 } 4391 4392 return n; 4393 } 4394 4395 static void 4396 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4397 { 4398 struct timeout_io_cb_arg *ctx = cb_arg; 4399 4400 ctx->type = bdev_io->type; 4401 ctx->iov.iov_base = bdev_io->iov.iov_base; 4402 ctx->iov.iov_len = bdev_io->iov.iov_len; 4403 } 4404 4405 static void 4406 bdev_set_io_timeout(void) 4407 { 4408 struct spdk_bdev *bdev; 4409 struct spdk_bdev_desc *desc = NULL; 4410 struct spdk_io_channel *io_ch = NULL; 4411 struct spdk_bdev_channel *bdev_ch = NULL; 4412 struct timeout_io_cb_arg cb_arg; 4413 4414 ut_init_bdev(NULL); 4415 bdev = allocate_bdev("bdev"); 4416 4417 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4418 SPDK_CU_ASSERT_FATAL(desc != NULL); 4419 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4420 4421 io_ch = spdk_bdev_get_io_channel(desc); 4422 CU_ASSERT(io_ch != NULL); 4423 4424 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4425 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4426 4427 /* This is the part1. 4428 * We will check the bdev_ch->io_submitted list 4429 * TO make sure that it can link IOs and only the user submitted IOs 4430 */ 4431 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4432 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4433 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4434 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4435 stub_complete_io(1); 4436 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4437 stub_complete_io(1); 4438 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4439 4440 /* Split IO */ 4441 bdev->optimal_io_boundary = 16; 4442 bdev->split_on_optimal_io_boundary = true; 4443 4444 /* Now test that a single-vector command is split correctly. 4445 * Offset 14, length 8, payload 0xF000 4446 * Child - Offset 14, length 2, payload 0xF000 4447 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4448 * 4449 * Set up the expected values before calling spdk_bdev_read_blocks 4450 */ 4451 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4452 /* We count all submitted IOs including IO that are generated by splitting. */ 4453 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4454 stub_complete_io(1); 4455 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4456 stub_complete_io(1); 4457 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4458 4459 /* Also include the reset IO */ 4460 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4461 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4462 poll_threads(); 4463 stub_complete_io(1); 4464 poll_threads(); 4465 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4466 4467 /* This is part2 4468 * Test the desc timeout poller register 4469 */ 4470 4471 /* Successfully set the timeout */ 4472 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4473 CU_ASSERT(desc->io_timeout_poller != NULL); 4474 CU_ASSERT(desc->timeout_in_sec == 30); 4475 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4476 CU_ASSERT(desc->cb_arg == &cb_arg); 4477 4478 /* Change the timeout limit */ 4479 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4480 CU_ASSERT(desc->io_timeout_poller != NULL); 4481 CU_ASSERT(desc->timeout_in_sec == 20); 4482 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4483 CU_ASSERT(desc->cb_arg == &cb_arg); 4484 4485 /* Disable the timeout */ 4486 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4487 CU_ASSERT(desc->io_timeout_poller == NULL); 4488 4489 /* This the part3 4490 * We will test to catch timeout IO and check whether the IO is 4491 * the submitted one. 4492 */ 4493 memset(&cb_arg, 0, sizeof(cb_arg)); 4494 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4495 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4496 4497 /* Don't reach the limit */ 4498 spdk_delay_us(15 * spdk_get_ticks_hz()); 4499 poll_threads(); 4500 CU_ASSERT(cb_arg.type == 0); 4501 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4502 CU_ASSERT(cb_arg.iov.iov_len == 0); 4503 4504 /* 15 + 15 = 30 reach the limit */ 4505 spdk_delay_us(15 * spdk_get_ticks_hz()); 4506 poll_threads(); 4507 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4508 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4509 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4510 stub_complete_io(1); 4511 4512 /* Use the same split IO above and check the IO */ 4513 memset(&cb_arg, 0, sizeof(cb_arg)); 4514 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4515 4516 /* The first child complete in time */ 4517 spdk_delay_us(15 * spdk_get_ticks_hz()); 4518 poll_threads(); 4519 stub_complete_io(1); 4520 CU_ASSERT(cb_arg.type == 0); 4521 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4522 CU_ASSERT(cb_arg.iov.iov_len == 0); 4523 4524 /* The second child reach the limit */ 4525 spdk_delay_us(15 * spdk_get_ticks_hz()); 4526 poll_threads(); 4527 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4528 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4529 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4530 stub_complete_io(1); 4531 4532 /* Also include the reset IO */ 4533 memset(&cb_arg, 0, sizeof(cb_arg)); 4534 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4535 spdk_delay_us(30 * spdk_get_ticks_hz()); 4536 poll_threads(); 4537 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4538 stub_complete_io(1); 4539 poll_threads(); 4540 4541 spdk_put_io_channel(io_ch); 4542 spdk_bdev_close(desc); 4543 free_bdev(bdev); 4544 ut_fini_bdev(); 4545 } 4546 4547 static void 4548 bdev_set_qd_sampling(void) 4549 { 4550 struct spdk_bdev *bdev; 4551 struct spdk_bdev_desc *desc = NULL; 4552 struct spdk_io_channel *io_ch = NULL; 4553 struct spdk_bdev_channel *bdev_ch = NULL; 4554 struct timeout_io_cb_arg cb_arg; 4555 4556 ut_init_bdev(NULL); 4557 bdev = allocate_bdev("bdev"); 4558 4559 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4560 SPDK_CU_ASSERT_FATAL(desc != NULL); 4561 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4562 4563 io_ch = spdk_bdev_get_io_channel(desc); 4564 CU_ASSERT(io_ch != NULL); 4565 4566 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4567 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4568 4569 /* This is the part1. 4570 * We will check the bdev_ch->io_submitted list 4571 * TO make sure that it can link IOs and only the user submitted IOs 4572 */ 4573 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4574 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4575 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4576 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4577 stub_complete_io(1); 4578 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4579 stub_complete_io(1); 4580 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4581 4582 /* This is the part2. 4583 * Test the bdev's qd poller register 4584 */ 4585 /* 1st Successfully set the qd sampling period */ 4586 spdk_bdev_set_qd_sampling_period(bdev, 10); 4587 CU_ASSERT(bdev->internal.new_period == 10); 4588 CU_ASSERT(bdev->internal.period == 10); 4589 CU_ASSERT(bdev->internal.qd_desc != NULL); 4590 poll_threads(); 4591 CU_ASSERT(bdev->internal.qd_poller != NULL); 4592 4593 /* 2nd Change the qd sampling period */ 4594 spdk_bdev_set_qd_sampling_period(bdev, 20); 4595 CU_ASSERT(bdev->internal.new_period == 20); 4596 CU_ASSERT(bdev->internal.period == 10); 4597 CU_ASSERT(bdev->internal.qd_desc != NULL); 4598 poll_threads(); 4599 CU_ASSERT(bdev->internal.qd_poller != NULL); 4600 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4601 4602 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4603 spdk_delay_us(20); 4604 poll_thread_times(0, 1); 4605 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4606 spdk_bdev_set_qd_sampling_period(bdev, 30); 4607 CU_ASSERT(bdev->internal.new_period == 30); 4608 CU_ASSERT(bdev->internal.period == 20); 4609 poll_threads(); 4610 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4611 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4612 4613 /* 4th Disable the qd sampling period */ 4614 spdk_bdev_set_qd_sampling_period(bdev, 0); 4615 CU_ASSERT(bdev->internal.new_period == 0); 4616 CU_ASSERT(bdev->internal.period == 30); 4617 poll_threads(); 4618 CU_ASSERT(bdev->internal.qd_poller == NULL); 4619 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4620 CU_ASSERT(bdev->internal.qd_desc == NULL); 4621 4622 /* This is the part3. 4623 * We will test the submitted IO and reset works 4624 * properly with the qd sampling. 4625 */ 4626 memset(&cb_arg, 0, sizeof(cb_arg)); 4627 spdk_bdev_set_qd_sampling_period(bdev, 1); 4628 poll_threads(); 4629 4630 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4631 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4632 4633 /* Also include the reset IO */ 4634 memset(&cb_arg, 0, sizeof(cb_arg)); 4635 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4636 poll_threads(); 4637 4638 /* Close the desc */ 4639 spdk_put_io_channel(io_ch); 4640 spdk_bdev_close(desc); 4641 4642 /* Complete the submitted IO and reset */ 4643 stub_complete_io(2); 4644 poll_threads(); 4645 4646 free_bdev(bdev); 4647 ut_fini_bdev(); 4648 } 4649 4650 static void 4651 lba_range_overlap(void) 4652 { 4653 struct lba_range r1, r2; 4654 4655 r1.offset = 100; 4656 r1.length = 50; 4657 4658 r2.offset = 0; 4659 r2.length = 1; 4660 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4661 4662 r2.offset = 0; 4663 r2.length = 100; 4664 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4665 4666 r2.offset = 0; 4667 r2.length = 110; 4668 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4669 4670 r2.offset = 100; 4671 r2.length = 10; 4672 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4673 4674 r2.offset = 110; 4675 r2.length = 20; 4676 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4677 4678 r2.offset = 140; 4679 r2.length = 150; 4680 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4681 4682 r2.offset = 130; 4683 r2.length = 200; 4684 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4685 4686 r2.offset = 150; 4687 r2.length = 100; 4688 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4689 4690 r2.offset = 110; 4691 r2.length = 0; 4692 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4693 } 4694 4695 static bool g_lock_lba_range_done; 4696 static bool g_unlock_lba_range_done; 4697 4698 static void 4699 lock_lba_range_done(void *ctx, int status) 4700 { 4701 g_lock_lba_range_done = true; 4702 } 4703 4704 static void 4705 unlock_lba_range_done(void *ctx, int status) 4706 { 4707 g_unlock_lba_range_done = true; 4708 } 4709 4710 static void 4711 lock_lba_range_check_ranges(void) 4712 { 4713 struct spdk_bdev *bdev; 4714 struct spdk_bdev_desc *desc = NULL; 4715 struct spdk_io_channel *io_ch; 4716 struct spdk_bdev_channel *channel; 4717 struct lba_range *range; 4718 int ctx1; 4719 int rc; 4720 4721 ut_init_bdev(NULL); 4722 bdev = allocate_bdev("bdev0"); 4723 4724 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4725 CU_ASSERT(rc == 0); 4726 CU_ASSERT(desc != NULL); 4727 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4728 io_ch = spdk_bdev_get_io_channel(desc); 4729 CU_ASSERT(io_ch != NULL); 4730 channel = spdk_io_channel_get_ctx(io_ch); 4731 4732 g_lock_lba_range_done = false; 4733 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4734 CU_ASSERT(rc == 0); 4735 poll_threads(); 4736 4737 CU_ASSERT(g_lock_lba_range_done == true); 4738 range = TAILQ_FIRST(&channel->locked_ranges); 4739 SPDK_CU_ASSERT_FATAL(range != NULL); 4740 CU_ASSERT(range->offset == 20); 4741 CU_ASSERT(range->length == 10); 4742 CU_ASSERT(range->owner_ch == channel); 4743 4744 /* Unlocks must exactly match a lock. */ 4745 g_unlock_lba_range_done = false; 4746 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4747 CU_ASSERT(rc == -EINVAL); 4748 CU_ASSERT(g_unlock_lba_range_done == false); 4749 4750 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4751 CU_ASSERT(rc == 0); 4752 spdk_delay_us(100); 4753 poll_threads(); 4754 4755 CU_ASSERT(g_unlock_lba_range_done == true); 4756 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4757 4758 spdk_put_io_channel(io_ch); 4759 spdk_bdev_close(desc); 4760 free_bdev(bdev); 4761 ut_fini_bdev(); 4762 } 4763 4764 static void 4765 lock_lba_range_with_io_outstanding(void) 4766 { 4767 struct spdk_bdev *bdev; 4768 struct spdk_bdev_desc *desc = NULL; 4769 struct spdk_io_channel *io_ch; 4770 struct spdk_bdev_channel *channel; 4771 struct lba_range *range; 4772 char buf[4096]; 4773 int ctx1; 4774 int rc; 4775 4776 ut_init_bdev(NULL); 4777 bdev = allocate_bdev("bdev0"); 4778 4779 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4780 CU_ASSERT(rc == 0); 4781 CU_ASSERT(desc != NULL); 4782 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4783 io_ch = spdk_bdev_get_io_channel(desc); 4784 CU_ASSERT(io_ch != NULL); 4785 channel = spdk_io_channel_get_ctx(io_ch); 4786 4787 g_io_done = false; 4788 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4789 CU_ASSERT(rc == 0); 4790 4791 g_lock_lba_range_done = false; 4792 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4793 CU_ASSERT(rc == 0); 4794 poll_threads(); 4795 4796 /* The lock should immediately become valid, since there are no outstanding 4797 * write I/O. 4798 */ 4799 CU_ASSERT(g_io_done == false); 4800 CU_ASSERT(g_lock_lba_range_done == true); 4801 range = TAILQ_FIRST(&channel->locked_ranges); 4802 SPDK_CU_ASSERT_FATAL(range != NULL); 4803 CU_ASSERT(range->offset == 20); 4804 CU_ASSERT(range->length == 10); 4805 CU_ASSERT(range->owner_ch == channel); 4806 CU_ASSERT(range->locked_ctx == &ctx1); 4807 4808 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4809 CU_ASSERT(rc == 0); 4810 stub_complete_io(1); 4811 spdk_delay_us(100); 4812 poll_threads(); 4813 4814 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4815 4816 /* Now try again, but with a write I/O. */ 4817 g_io_done = false; 4818 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4819 CU_ASSERT(rc == 0); 4820 4821 g_lock_lba_range_done = false; 4822 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4823 CU_ASSERT(rc == 0); 4824 poll_threads(); 4825 4826 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4827 * But note that the range should be on the channel's locked_list, to make sure no 4828 * new write I/O are started. 4829 */ 4830 CU_ASSERT(g_io_done == false); 4831 CU_ASSERT(g_lock_lba_range_done == false); 4832 range = TAILQ_FIRST(&channel->locked_ranges); 4833 SPDK_CU_ASSERT_FATAL(range != NULL); 4834 CU_ASSERT(range->offset == 20); 4835 CU_ASSERT(range->length == 10); 4836 4837 /* Complete the write I/O. This should make the lock valid (checked by confirming 4838 * our callback was invoked). 4839 */ 4840 stub_complete_io(1); 4841 spdk_delay_us(100); 4842 poll_threads(); 4843 CU_ASSERT(g_io_done == true); 4844 CU_ASSERT(g_lock_lba_range_done == true); 4845 4846 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4847 CU_ASSERT(rc == 0); 4848 poll_threads(); 4849 4850 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4851 4852 spdk_put_io_channel(io_ch); 4853 spdk_bdev_close(desc); 4854 free_bdev(bdev); 4855 ut_fini_bdev(); 4856 } 4857 4858 static void 4859 lock_lba_range_overlapped(void) 4860 { 4861 struct spdk_bdev *bdev; 4862 struct spdk_bdev_desc *desc = NULL; 4863 struct spdk_io_channel *io_ch; 4864 struct spdk_bdev_channel *channel; 4865 struct lba_range *range; 4866 int ctx1; 4867 int rc; 4868 4869 ut_init_bdev(NULL); 4870 bdev = allocate_bdev("bdev0"); 4871 4872 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4873 CU_ASSERT(rc == 0); 4874 CU_ASSERT(desc != NULL); 4875 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4876 io_ch = spdk_bdev_get_io_channel(desc); 4877 CU_ASSERT(io_ch != NULL); 4878 channel = spdk_io_channel_get_ctx(io_ch); 4879 4880 /* Lock range 20-29. */ 4881 g_lock_lba_range_done = false; 4882 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4883 CU_ASSERT(rc == 0); 4884 poll_threads(); 4885 4886 CU_ASSERT(g_lock_lba_range_done == true); 4887 range = TAILQ_FIRST(&channel->locked_ranges); 4888 SPDK_CU_ASSERT_FATAL(range != NULL); 4889 CU_ASSERT(range->offset == 20); 4890 CU_ASSERT(range->length == 10); 4891 4892 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4893 * 20-29. 4894 */ 4895 g_lock_lba_range_done = false; 4896 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4897 CU_ASSERT(rc == 0); 4898 poll_threads(); 4899 4900 CU_ASSERT(g_lock_lba_range_done == false); 4901 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4902 SPDK_CU_ASSERT_FATAL(range != NULL); 4903 CU_ASSERT(range->offset == 25); 4904 CU_ASSERT(range->length == 15); 4905 4906 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4907 * no longer overlaps with an active lock. 4908 */ 4909 g_unlock_lba_range_done = false; 4910 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4911 CU_ASSERT(rc == 0); 4912 poll_threads(); 4913 4914 CU_ASSERT(g_unlock_lba_range_done == true); 4915 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4916 range = TAILQ_FIRST(&channel->locked_ranges); 4917 SPDK_CU_ASSERT_FATAL(range != NULL); 4918 CU_ASSERT(range->offset == 25); 4919 CU_ASSERT(range->length == 15); 4920 4921 /* Lock 40-59. This should immediately lock since it does not overlap with the 4922 * currently active 25-39 lock. 4923 */ 4924 g_lock_lba_range_done = false; 4925 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4926 CU_ASSERT(rc == 0); 4927 poll_threads(); 4928 4929 CU_ASSERT(g_lock_lba_range_done == true); 4930 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4931 SPDK_CU_ASSERT_FATAL(range != NULL); 4932 range = TAILQ_NEXT(range, tailq); 4933 SPDK_CU_ASSERT_FATAL(range != NULL); 4934 CU_ASSERT(range->offset == 40); 4935 CU_ASSERT(range->length == 20); 4936 4937 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4938 g_lock_lba_range_done = false; 4939 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4940 CU_ASSERT(rc == 0); 4941 poll_threads(); 4942 4943 CU_ASSERT(g_lock_lba_range_done == false); 4944 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4945 SPDK_CU_ASSERT_FATAL(range != NULL); 4946 CU_ASSERT(range->offset == 35); 4947 CU_ASSERT(range->length == 10); 4948 4949 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4950 * the 40-59 lock is still active. 4951 */ 4952 g_unlock_lba_range_done = false; 4953 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4954 CU_ASSERT(rc == 0); 4955 poll_threads(); 4956 4957 CU_ASSERT(g_unlock_lba_range_done == true); 4958 CU_ASSERT(g_lock_lba_range_done == false); 4959 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4960 SPDK_CU_ASSERT_FATAL(range != NULL); 4961 CU_ASSERT(range->offset == 35); 4962 CU_ASSERT(range->length == 10); 4963 4964 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4965 * no longer any active overlapping locks. 4966 */ 4967 g_unlock_lba_range_done = false; 4968 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4969 CU_ASSERT(rc == 0); 4970 poll_threads(); 4971 4972 CU_ASSERT(g_unlock_lba_range_done == true); 4973 CU_ASSERT(g_lock_lba_range_done == true); 4974 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4975 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4976 SPDK_CU_ASSERT_FATAL(range != NULL); 4977 CU_ASSERT(range->offset == 35); 4978 CU_ASSERT(range->length == 10); 4979 4980 /* Finally, unlock 35-44. */ 4981 g_unlock_lba_range_done = false; 4982 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4983 CU_ASSERT(rc == 0); 4984 poll_threads(); 4985 4986 CU_ASSERT(g_unlock_lba_range_done == true); 4987 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4988 4989 spdk_put_io_channel(io_ch); 4990 spdk_bdev_close(desc); 4991 free_bdev(bdev); 4992 ut_fini_bdev(); 4993 } 4994 4995 static void 4996 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4997 { 4998 g_abort_done = true; 4999 g_abort_status = bdev_io->internal.status; 5000 spdk_bdev_free_io(bdev_io); 5001 } 5002 5003 static void 5004 bdev_io_abort(void) 5005 { 5006 struct spdk_bdev *bdev; 5007 struct spdk_bdev_desc *desc = NULL; 5008 struct spdk_io_channel *io_ch; 5009 struct spdk_bdev_channel *channel; 5010 struct spdk_bdev_mgmt_channel *mgmt_ch; 5011 struct spdk_bdev_opts bdev_opts = {}; 5012 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 5013 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5014 int rc; 5015 5016 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5017 bdev_opts.bdev_io_pool_size = 7; 5018 bdev_opts.bdev_io_cache_size = 2; 5019 ut_init_bdev(&bdev_opts); 5020 5021 bdev = allocate_bdev("bdev0"); 5022 5023 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5024 CU_ASSERT(rc == 0); 5025 CU_ASSERT(desc != NULL); 5026 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5027 io_ch = spdk_bdev_get_io_channel(desc); 5028 CU_ASSERT(io_ch != NULL); 5029 channel = spdk_io_channel_get_ctx(io_ch); 5030 mgmt_ch = channel->shared_resource->mgmt_ch; 5031 5032 g_abort_done = false; 5033 5034 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5035 5036 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5037 CU_ASSERT(rc == -ENOTSUP); 5038 5039 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5040 5041 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5042 CU_ASSERT(rc == 0); 5043 CU_ASSERT(g_abort_done == true); 5044 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5045 5046 /* Test the case that the target I/O was successfully aborted. */ 5047 g_io_done = false; 5048 5049 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5050 CU_ASSERT(rc == 0); 5051 CU_ASSERT(g_io_done == false); 5052 5053 g_abort_done = false; 5054 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5055 5056 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5057 CU_ASSERT(rc == 0); 5058 CU_ASSERT(g_io_done == true); 5059 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5060 stub_complete_io(1); 5061 CU_ASSERT(g_abort_done == true); 5062 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5063 5064 /* Test the case that the target I/O was not aborted because it completed 5065 * in the middle of execution of the abort. 5066 */ 5067 g_io_done = false; 5068 5069 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5070 CU_ASSERT(rc == 0); 5071 CU_ASSERT(g_io_done == false); 5072 5073 g_abort_done = false; 5074 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5075 5076 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5077 CU_ASSERT(rc == 0); 5078 CU_ASSERT(g_io_done == false); 5079 5080 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5081 stub_complete_io(1); 5082 CU_ASSERT(g_io_done == true); 5083 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5084 5085 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5086 stub_complete_io(1); 5087 CU_ASSERT(g_abort_done == true); 5088 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5089 5090 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5091 5092 bdev->optimal_io_boundary = 16; 5093 bdev->split_on_optimal_io_boundary = true; 5094 5095 /* Test that a single-vector command which is split is aborted correctly. 5096 * Offset 14, length 8, payload 0xF000 5097 * Child - Offset 14, length 2, payload 0xF000 5098 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5099 */ 5100 g_io_done = false; 5101 5102 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5103 CU_ASSERT(rc == 0); 5104 CU_ASSERT(g_io_done == false); 5105 5106 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5107 5108 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5109 5110 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5111 CU_ASSERT(rc == 0); 5112 CU_ASSERT(g_io_done == true); 5113 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5114 stub_complete_io(2); 5115 CU_ASSERT(g_abort_done == true); 5116 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5117 5118 /* Test that a multi-vector command that needs to be split by strip and then 5119 * needs to be split is aborted correctly. Abort is requested before the second 5120 * child I/O was submitted. The parent I/O should complete with failure without 5121 * submitting the second child I/O. 5122 */ 5123 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5124 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5125 iov[i].iov_len = 512; 5126 } 5127 5128 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5129 g_io_done = false; 5130 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5131 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5132 CU_ASSERT(rc == 0); 5133 CU_ASSERT(g_io_done == false); 5134 5135 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5136 5137 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5138 5139 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5140 CU_ASSERT(rc == 0); 5141 CU_ASSERT(g_io_done == true); 5142 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5143 stub_complete_io(1); 5144 CU_ASSERT(g_abort_done == true); 5145 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5146 5147 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5148 5149 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5150 5151 bdev->optimal_io_boundary = 16; 5152 g_io_done = false; 5153 5154 /* Test that a ingle-vector command which is split is aborted correctly. 5155 * Differently from the above, the child abort request will be submitted 5156 * sequentially due to the capacity of spdk_bdev_io. 5157 */ 5158 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5159 CU_ASSERT(rc == 0); 5160 CU_ASSERT(g_io_done == false); 5161 5162 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5163 5164 g_abort_done = false; 5165 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5166 5167 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5168 CU_ASSERT(rc == 0); 5169 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5170 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5171 5172 stub_complete_io(1); 5173 CU_ASSERT(g_io_done == true); 5174 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5175 stub_complete_io(3); 5176 CU_ASSERT(g_abort_done == true); 5177 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5178 5179 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5180 5181 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5182 5183 spdk_put_io_channel(io_ch); 5184 spdk_bdev_close(desc); 5185 free_bdev(bdev); 5186 ut_fini_bdev(); 5187 } 5188 5189 static void 5190 bdev_unmap(void) 5191 { 5192 struct spdk_bdev *bdev; 5193 struct spdk_bdev_desc *desc = NULL; 5194 struct spdk_io_channel *ioch; 5195 struct spdk_bdev_channel *bdev_ch; 5196 struct ut_expected_io *expected_io; 5197 struct spdk_bdev_opts bdev_opts = {}; 5198 uint32_t i, num_outstanding; 5199 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5200 int rc; 5201 5202 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5203 bdev_opts.bdev_io_pool_size = 512; 5204 bdev_opts.bdev_io_cache_size = 64; 5205 ut_init_bdev(&bdev_opts); 5206 5207 bdev = allocate_bdev("bdev"); 5208 5209 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5210 CU_ASSERT_EQUAL(rc, 0); 5211 SPDK_CU_ASSERT_FATAL(desc != NULL); 5212 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5213 ioch = spdk_bdev_get_io_channel(desc); 5214 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5215 bdev_ch = spdk_io_channel_get_ctx(ioch); 5216 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5217 5218 fn_table.submit_request = stub_submit_request; 5219 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5220 5221 /* Case 1: First test the request won't be split */ 5222 num_blocks = 32; 5223 5224 g_io_done = false; 5225 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5226 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5227 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5228 CU_ASSERT_EQUAL(rc, 0); 5229 CU_ASSERT(g_io_done == false); 5230 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5231 stub_complete_io(1); 5232 CU_ASSERT(g_io_done == true); 5233 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5234 5235 /* Case 2: Test the split with 2 children requests */ 5236 bdev->max_unmap = 8; 5237 bdev->max_unmap_segments = 2; 5238 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5239 num_blocks = max_unmap_blocks * 2; 5240 offset = 0; 5241 5242 g_io_done = false; 5243 for (i = 0; i < 2; i++) { 5244 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5245 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5246 offset += max_unmap_blocks; 5247 } 5248 5249 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5250 CU_ASSERT_EQUAL(rc, 0); 5251 CU_ASSERT(g_io_done == false); 5252 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5253 stub_complete_io(2); 5254 CU_ASSERT(g_io_done == true); 5255 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5256 5257 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5258 num_children = 15; 5259 num_blocks = max_unmap_blocks * num_children; 5260 g_io_done = false; 5261 offset = 0; 5262 for (i = 0; i < num_children; i++) { 5263 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5264 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5265 offset += max_unmap_blocks; 5266 } 5267 5268 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5269 CU_ASSERT_EQUAL(rc, 0); 5270 CU_ASSERT(g_io_done == false); 5271 5272 while (num_children > 0) { 5273 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5274 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5275 stub_complete_io(num_outstanding); 5276 num_children -= num_outstanding; 5277 } 5278 CU_ASSERT(g_io_done == true); 5279 5280 spdk_put_io_channel(ioch); 5281 spdk_bdev_close(desc); 5282 free_bdev(bdev); 5283 ut_fini_bdev(); 5284 } 5285 5286 static void 5287 bdev_write_zeroes_split_test(void) 5288 { 5289 struct spdk_bdev *bdev; 5290 struct spdk_bdev_desc *desc = NULL; 5291 struct spdk_io_channel *ioch; 5292 struct spdk_bdev_channel *bdev_ch; 5293 struct ut_expected_io *expected_io; 5294 struct spdk_bdev_opts bdev_opts = {}; 5295 uint32_t i, num_outstanding; 5296 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5297 int rc; 5298 5299 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5300 bdev_opts.bdev_io_pool_size = 512; 5301 bdev_opts.bdev_io_cache_size = 64; 5302 ut_init_bdev(&bdev_opts); 5303 5304 bdev = allocate_bdev("bdev"); 5305 5306 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5307 CU_ASSERT_EQUAL(rc, 0); 5308 SPDK_CU_ASSERT_FATAL(desc != NULL); 5309 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5310 ioch = spdk_bdev_get_io_channel(desc); 5311 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5312 bdev_ch = spdk_io_channel_get_ctx(ioch); 5313 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5314 5315 fn_table.submit_request = stub_submit_request; 5316 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5317 5318 /* Case 1: First test the request won't be split */ 5319 num_blocks = 32; 5320 5321 g_io_done = false; 5322 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5323 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5324 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5325 CU_ASSERT_EQUAL(rc, 0); 5326 CU_ASSERT(g_io_done == false); 5327 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5328 stub_complete_io(1); 5329 CU_ASSERT(g_io_done == true); 5330 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5331 5332 /* Case 2: Test the split with 2 children requests */ 5333 max_write_zeroes_blocks = 8; 5334 bdev->max_write_zeroes = max_write_zeroes_blocks; 5335 num_blocks = max_write_zeroes_blocks * 2; 5336 offset = 0; 5337 5338 g_io_done = false; 5339 for (i = 0; i < 2; i++) { 5340 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5341 0); 5342 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5343 offset += max_write_zeroes_blocks; 5344 } 5345 5346 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5347 CU_ASSERT_EQUAL(rc, 0); 5348 CU_ASSERT(g_io_done == false); 5349 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5350 stub_complete_io(2); 5351 CU_ASSERT(g_io_done == true); 5352 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5353 5354 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5355 num_children = 15; 5356 num_blocks = max_write_zeroes_blocks * num_children; 5357 g_io_done = false; 5358 offset = 0; 5359 for (i = 0; i < num_children; i++) { 5360 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5361 0); 5362 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5363 offset += max_write_zeroes_blocks; 5364 } 5365 5366 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5367 CU_ASSERT_EQUAL(rc, 0); 5368 CU_ASSERT(g_io_done == false); 5369 5370 while (num_children > 0) { 5371 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5372 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5373 stub_complete_io(num_outstanding); 5374 num_children -= num_outstanding; 5375 } 5376 CU_ASSERT(g_io_done == true); 5377 5378 spdk_put_io_channel(ioch); 5379 spdk_bdev_close(desc); 5380 free_bdev(bdev); 5381 ut_fini_bdev(); 5382 } 5383 5384 static void 5385 bdev_set_options_test(void) 5386 { 5387 struct spdk_bdev_opts bdev_opts = {}; 5388 int rc; 5389 5390 /* Case1: Do not set opts_size */ 5391 rc = spdk_bdev_set_opts(&bdev_opts); 5392 CU_ASSERT(rc == -1); 5393 5394 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5395 bdev_opts.bdev_io_pool_size = 4; 5396 bdev_opts.bdev_io_cache_size = 2; 5397 bdev_opts.small_buf_pool_size = 4; 5398 5399 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5400 rc = spdk_bdev_set_opts(&bdev_opts); 5401 CU_ASSERT(rc == -1); 5402 5403 /* Case 3: Do not set valid large_buf_pool_size */ 5404 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5405 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5406 rc = spdk_bdev_set_opts(&bdev_opts); 5407 CU_ASSERT(rc == -1); 5408 5409 /* Case4: set valid large buf_pool_size */ 5410 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5411 rc = spdk_bdev_set_opts(&bdev_opts); 5412 CU_ASSERT(rc == 0); 5413 5414 /* Case5: Set different valid value for small and large buf pool */ 5415 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5416 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5417 rc = spdk_bdev_set_opts(&bdev_opts); 5418 CU_ASSERT(rc == 0); 5419 } 5420 5421 static uint64_t 5422 get_ns_time(void) 5423 { 5424 int rc; 5425 struct timespec ts; 5426 5427 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5428 CU_ASSERT(rc == 0); 5429 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5430 } 5431 5432 static int 5433 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5434 { 5435 int h1, h2; 5436 5437 if (bdev_name == NULL) { 5438 return -1; 5439 } else { 5440 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5441 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5442 5443 return spdk_max(h1, h2) + 1; 5444 } 5445 } 5446 5447 static void 5448 bdev_multi_allocation(void) 5449 { 5450 const int max_bdev_num = 1024 * 16; 5451 char name[max_bdev_num][16]; 5452 char noexist_name[] = "invalid_bdev"; 5453 struct spdk_bdev *bdev[max_bdev_num]; 5454 int i, j; 5455 uint64_t last_time; 5456 int bdev_num; 5457 int height; 5458 5459 for (j = 0; j < max_bdev_num; j++) { 5460 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5461 } 5462 5463 for (i = 0; i < 16; i++) { 5464 last_time = get_ns_time(); 5465 bdev_num = 1024 * (i + 1); 5466 for (j = 0; j < bdev_num; j++) { 5467 bdev[j] = allocate_bdev(name[j]); 5468 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5469 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5470 } 5471 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5472 (get_ns_time() - last_time) / 1000 / 1000); 5473 for (j = 0; j < bdev_num; j++) { 5474 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5475 } 5476 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5477 5478 for (j = 0; j < bdev_num; j++) { 5479 free_bdev(bdev[j]); 5480 } 5481 for (j = 0; j < bdev_num; j++) { 5482 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5483 } 5484 } 5485 } 5486 5487 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5488 5489 static int 5490 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5491 int array_size) 5492 { 5493 if (array_size > 0 && domains) { 5494 domains[0] = g_bdev_memory_domain; 5495 } 5496 5497 return 1; 5498 } 5499 5500 static void 5501 bdev_get_memory_domains(void) 5502 { 5503 struct spdk_bdev_fn_table fn_table = { 5504 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5505 }; 5506 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5507 struct spdk_memory_domain *domains[2] = {}; 5508 int rc; 5509 5510 /* bdev is NULL */ 5511 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5512 CU_ASSERT(rc == -EINVAL); 5513 5514 /* domains is NULL */ 5515 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5516 CU_ASSERT(rc == 1); 5517 5518 /* array size is 0 */ 5519 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5520 CU_ASSERT(rc == 1); 5521 5522 /* get_supported_dma_device_types op is set */ 5523 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5524 CU_ASSERT(rc == 1); 5525 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5526 5527 /* get_supported_dma_device_types op is not set */ 5528 fn_table.get_memory_domains = NULL; 5529 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5530 CU_ASSERT(rc == 0); 5531 } 5532 5533 static void 5534 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5535 { 5536 struct spdk_bdev *bdev; 5537 struct spdk_bdev_desc *desc = NULL; 5538 struct spdk_io_channel *io_ch; 5539 char io_buf[512]; 5540 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5541 struct ut_expected_io *expected_io; 5542 int rc; 5543 5544 ut_init_bdev(NULL); 5545 5546 bdev = allocate_bdev("bdev0"); 5547 bdev->md_interleave = false; 5548 bdev->md_len = 8; 5549 5550 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5551 CU_ASSERT(rc == 0); 5552 SPDK_CU_ASSERT_FATAL(desc != NULL); 5553 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5554 io_ch = spdk_bdev_get_io_channel(desc); 5555 CU_ASSERT(io_ch != NULL); 5556 5557 /* read */ 5558 g_io_done = false; 5559 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5560 if (ext_io_opts) { 5561 expected_io->md_buf = ext_io_opts->metadata; 5562 } 5563 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5564 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5565 5566 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5567 5568 CU_ASSERT(rc == 0); 5569 CU_ASSERT(g_io_done == false); 5570 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5571 stub_complete_io(1); 5572 CU_ASSERT(g_io_done == true); 5573 5574 /* write */ 5575 g_io_done = false; 5576 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5577 if (ext_io_opts) { 5578 expected_io->md_buf = ext_io_opts->metadata; 5579 } 5580 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5581 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5582 5583 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5584 5585 CU_ASSERT(rc == 0); 5586 CU_ASSERT(g_io_done == false); 5587 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5588 stub_complete_io(1); 5589 CU_ASSERT(g_io_done == true); 5590 5591 spdk_put_io_channel(io_ch); 5592 spdk_bdev_close(desc); 5593 free_bdev(bdev); 5594 ut_fini_bdev(); 5595 5596 } 5597 5598 static void 5599 bdev_io_ext(void) 5600 { 5601 struct spdk_bdev_ext_io_opts ext_io_opts = { 5602 .metadata = (void *)0xFF000000, 5603 .size = sizeof(ext_io_opts) 5604 }; 5605 5606 _bdev_io_ext(&ext_io_opts); 5607 } 5608 5609 static void 5610 bdev_io_ext_no_opts(void) 5611 { 5612 _bdev_io_ext(NULL); 5613 } 5614 5615 static void 5616 bdev_io_ext_invalid_opts(void) 5617 { 5618 struct spdk_bdev *bdev; 5619 struct spdk_bdev_desc *desc = NULL; 5620 struct spdk_io_channel *io_ch; 5621 char io_buf[512]; 5622 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5623 struct spdk_bdev_ext_io_opts ext_io_opts = { 5624 .metadata = (void *)0xFF000000, 5625 .size = sizeof(ext_io_opts) 5626 }; 5627 int rc; 5628 5629 ut_init_bdev(NULL); 5630 5631 bdev = allocate_bdev("bdev0"); 5632 bdev->md_interleave = false; 5633 bdev->md_len = 8; 5634 5635 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5636 CU_ASSERT(rc == 0); 5637 SPDK_CU_ASSERT_FATAL(desc != NULL); 5638 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5639 io_ch = spdk_bdev_get_io_channel(desc); 5640 CU_ASSERT(io_ch != NULL); 5641 5642 /* Test invalid ext_opts size */ 5643 ext_io_opts.size = 0; 5644 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5645 CU_ASSERT(rc == -EINVAL); 5646 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5647 CU_ASSERT(rc == -EINVAL); 5648 5649 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5650 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5651 CU_ASSERT(rc == -EINVAL); 5652 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5653 CU_ASSERT(rc == -EINVAL); 5654 5655 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5656 sizeof(ext_io_opts.metadata) - 1; 5657 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5658 CU_ASSERT(rc == -EINVAL); 5659 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5660 CU_ASSERT(rc == -EINVAL); 5661 5662 spdk_put_io_channel(io_ch); 5663 spdk_bdev_close(desc); 5664 free_bdev(bdev); 5665 ut_fini_bdev(); 5666 } 5667 5668 static void 5669 bdev_io_ext_split(void) 5670 { 5671 struct spdk_bdev *bdev; 5672 struct spdk_bdev_desc *desc = NULL; 5673 struct spdk_io_channel *io_ch; 5674 char io_buf[512]; 5675 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5676 struct ut_expected_io *expected_io; 5677 struct spdk_bdev_ext_io_opts ext_io_opts = { 5678 .metadata = (void *)0xFF000000, 5679 .size = sizeof(ext_io_opts) 5680 }; 5681 int rc; 5682 5683 ut_init_bdev(NULL); 5684 5685 bdev = allocate_bdev("bdev0"); 5686 bdev->md_interleave = false; 5687 bdev->md_len = 8; 5688 5689 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5690 CU_ASSERT(rc == 0); 5691 SPDK_CU_ASSERT_FATAL(desc != NULL); 5692 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5693 io_ch = spdk_bdev_get_io_channel(desc); 5694 CU_ASSERT(io_ch != NULL); 5695 5696 /* Check that IO request with ext_opts and metadata is split correctly 5697 * Offset 14, length 8, payload 0xF000 5698 * Child - Offset 14, length 2, payload 0xF000 5699 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5700 */ 5701 bdev->optimal_io_boundary = 16; 5702 bdev->split_on_optimal_io_boundary = true; 5703 bdev->md_interleave = false; 5704 bdev->md_len = 8; 5705 5706 iov.iov_base = (void *)0xF000; 5707 iov.iov_len = 4096; 5708 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5709 ext_io_opts.metadata = (void *)0xFF000000; 5710 ext_io_opts.size = sizeof(ext_io_opts); 5711 g_io_done = false; 5712 5713 /* read */ 5714 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5715 expected_io->md_buf = ext_io_opts.metadata; 5716 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5717 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5718 5719 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5720 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5721 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5722 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5723 5724 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5725 CU_ASSERT(rc == 0); 5726 CU_ASSERT(g_io_done == false); 5727 5728 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5729 stub_complete_io(2); 5730 CU_ASSERT(g_io_done == true); 5731 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5732 5733 /* write */ 5734 g_io_done = false; 5735 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5736 expected_io->md_buf = ext_io_opts.metadata; 5737 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5738 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5739 5740 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5741 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5742 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5743 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5744 5745 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5746 CU_ASSERT(rc == 0); 5747 CU_ASSERT(g_io_done == false); 5748 5749 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5750 stub_complete_io(2); 5751 CU_ASSERT(g_io_done == true); 5752 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5753 5754 spdk_put_io_channel(io_ch); 5755 spdk_bdev_close(desc); 5756 free_bdev(bdev); 5757 ut_fini_bdev(); 5758 } 5759 5760 static void 5761 bdev_io_ext_bounce_buffer(void) 5762 { 5763 struct spdk_bdev *bdev; 5764 struct spdk_bdev_desc *desc = NULL; 5765 struct spdk_io_channel *io_ch; 5766 char io_buf[512]; 5767 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5768 struct ut_expected_io *expected_io; 5769 struct spdk_bdev_ext_io_opts ext_io_opts = { 5770 .metadata = (void *)0xFF000000, 5771 .size = sizeof(ext_io_opts) 5772 }; 5773 int rc; 5774 5775 ut_init_bdev(NULL); 5776 5777 bdev = allocate_bdev("bdev0"); 5778 bdev->md_interleave = false; 5779 bdev->md_len = 8; 5780 5781 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5782 CU_ASSERT(rc == 0); 5783 SPDK_CU_ASSERT_FATAL(desc != NULL); 5784 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5785 io_ch = spdk_bdev_get_io_channel(desc); 5786 CU_ASSERT(io_ch != NULL); 5787 5788 /* Verify data pull/push 5789 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5790 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5791 5792 /* read */ 5793 g_io_done = false; 5794 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5795 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5796 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5797 5798 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5799 5800 CU_ASSERT(rc == 0); 5801 CU_ASSERT(g_io_done == false); 5802 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5803 stub_complete_io(1); 5804 CU_ASSERT(g_memory_domain_push_data_called == true); 5805 CU_ASSERT(g_io_done == true); 5806 5807 /* write */ 5808 g_io_done = false; 5809 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5810 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5811 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5812 5813 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5814 5815 CU_ASSERT(rc == 0); 5816 CU_ASSERT(g_memory_domain_pull_data_called == true); 5817 CU_ASSERT(g_io_done == false); 5818 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5819 stub_complete_io(1); 5820 CU_ASSERT(g_io_done == true); 5821 5822 spdk_put_io_channel(io_ch); 5823 spdk_bdev_close(desc); 5824 free_bdev(bdev); 5825 ut_fini_bdev(); 5826 } 5827 5828 static void 5829 bdev_register_uuid_alias(void) 5830 { 5831 struct spdk_bdev *bdev, *second; 5832 char uuid[SPDK_UUID_STRING_LEN]; 5833 int rc; 5834 5835 ut_init_bdev(NULL); 5836 bdev = allocate_bdev("bdev0"); 5837 5838 /* Make sure an UUID was generated */ 5839 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5840 5841 /* Check that an UUID alias was registered */ 5842 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5843 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5844 5845 /* Unregister the bdev */ 5846 spdk_bdev_unregister(bdev, NULL, NULL); 5847 poll_threads(); 5848 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5849 5850 /* Check the same, but this time register the bdev with non-zero UUID */ 5851 rc = spdk_bdev_register(bdev); 5852 CU_ASSERT_EQUAL(rc, 0); 5853 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5854 5855 /* Unregister the bdev */ 5856 spdk_bdev_unregister(bdev, NULL, NULL); 5857 poll_threads(); 5858 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5859 5860 /* Regiser the bdev using UUID as the name */ 5861 bdev->name = uuid; 5862 rc = spdk_bdev_register(bdev); 5863 CU_ASSERT_EQUAL(rc, 0); 5864 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5865 5866 /* Unregister the bdev */ 5867 spdk_bdev_unregister(bdev, NULL, NULL); 5868 poll_threads(); 5869 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5870 5871 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5872 bdev->name = "bdev0"; 5873 second = allocate_bdev("bdev1"); 5874 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5875 rc = spdk_bdev_register(bdev); 5876 CU_ASSERT_EQUAL(rc, -EEXIST); 5877 5878 /* Regenerate the UUID and re-check */ 5879 spdk_uuid_generate(&bdev->uuid); 5880 rc = spdk_bdev_register(bdev); 5881 CU_ASSERT_EQUAL(rc, 0); 5882 5883 /* And check that both bdevs can be retrieved through their UUIDs */ 5884 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5885 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5886 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5887 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5888 5889 free_bdev(second); 5890 free_bdev(bdev); 5891 ut_fini_bdev(); 5892 } 5893 5894 static void 5895 bdev_unregister_by_name(void) 5896 { 5897 struct spdk_bdev *bdev; 5898 int rc; 5899 5900 bdev = allocate_bdev("bdev"); 5901 5902 g_event_type1 = 0xFF; 5903 g_unregister_arg = NULL; 5904 g_unregister_rc = -1; 5905 5906 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5907 CU_ASSERT(rc == -ENODEV); 5908 5909 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5910 CU_ASSERT(rc == -ENODEV); 5911 5912 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5913 CU_ASSERT(rc == 0); 5914 5915 /* Check that unregister callback is delayed */ 5916 CU_ASSERT(g_unregister_arg == NULL); 5917 CU_ASSERT(g_unregister_rc == -1); 5918 5919 poll_threads(); 5920 5921 /* Event callback shall not be issued because device was closed */ 5922 CU_ASSERT(g_event_type1 == 0xFF); 5923 /* Unregister callback is issued */ 5924 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5925 CU_ASSERT(g_unregister_rc == 0); 5926 5927 free_bdev(bdev); 5928 } 5929 5930 static int 5931 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5932 { 5933 int *count = ctx; 5934 5935 (*count)++; 5936 5937 return 0; 5938 } 5939 5940 static void 5941 for_each_bdev_test(void) 5942 { 5943 struct spdk_bdev *bdev[8]; 5944 int rc, count; 5945 5946 bdev[0] = allocate_bdev("bdev0"); 5947 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 5948 5949 bdev[1] = allocate_bdev("bdev1"); 5950 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5951 CU_ASSERT(rc == 0); 5952 5953 bdev[2] = allocate_bdev("bdev2"); 5954 5955 bdev[3] = allocate_bdev("bdev3"); 5956 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5957 CU_ASSERT(rc == 0); 5958 5959 bdev[4] = allocate_bdev("bdev4"); 5960 5961 bdev[5] = allocate_bdev("bdev5"); 5962 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5963 CU_ASSERT(rc == 0); 5964 5965 bdev[6] = allocate_bdev("bdev6"); 5966 5967 bdev[7] = allocate_bdev("bdev7"); 5968 5969 count = 0; 5970 rc = spdk_for_each_bdev(&count, count_bdevs); 5971 CU_ASSERT(rc == 0); 5972 CU_ASSERT(count == 7); 5973 5974 count = 0; 5975 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5976 CU_ASSERT(rc == 0); 5977 CU_ASSERT(count == 4); 5978 5979 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 5980 free_bdev(bdev[0]); 5981 free_bdev(bdev[1]); 5982 free_bdev(bdev[2]); 5983 free_bdev(bdev[3]); 5984 free_bdev(bdev[4]); 5985 free_bdev(bdev[5]); 5986 free_bdev(bdev[6]); 5987 free_bdev(bdev[7]); 5988 } 5989 5990 static void 5991 bdev_seek_test(void) 5992 { 5993 struct spdk_bdev *bdev; 5994 struct spdk_bdev_desc *desc = NULL; 5995 struct spdk_io_channel *io_ch; 5996 int rc; 5997 5998 ut_init_bdev(NULL); 5999 poll_threads(); 6000 6001 bdev = allocate_bdev("bdev0"); 6002 6003 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6004 CU_ASSERT(rc == 0); 6005 poll_threads(); 6006 SPDK_CU_ASSERT_FATAL(desc != NULL); 6007 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6008 io_ch = spdk_bdev_get_io_channel(desc); 6009 CU_ASSERT(io_ch != NULL); 6010 6011 /* Seek data not supported */ 6012 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6013 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6014 CU_ASSERT(rc == 0); 6015 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6016 poll_threads(); 6017 CU_ASSERT(g_seek_offset == 0); 6018 6019 /* Seek hole not supported */ 6020 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6021 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6022 CU_ASSERT(rc == 0); 6023 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6024 poll_threads(); 6025 CU_ASSERT(g_seek_offset == UINT64_MAX); 6026 6027 /* Seek data supported */ 6028 g_seek_data_offset = 12345; 6029 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6030 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6031 CU_ASSERT(rc == 0); 6032 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6033 stub_complete_io(1); 6034 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6035 CU_ASSERT(g_seek_offset == 12345); 6036 6037 /* Seek hole supported */ 6038 g_seek_hole_offset = 67890; 6039 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6040 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6041 CU_ASSERT(rc == 0); 6042 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6043 stub_complete_io(1); 6044 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6045 CU_ASSERT(g_seek_offset == 67890); 6046 6047 spdk_put_io_channel(io_ch); 6048 spdk_bdev_close(desc); 6049 free_bdev(bdev); 6050 ut_fini_bdev(); 6051 } 6052 6053 static void 6054 bdev_copy(void) 6055 { 6056 struct spdk_bdev *bdev; 6057 struct spdk_bdev_desc *desc = NULL; 6058 struct spdk_io_channel *ioch; 6059 struct ut_expected_io *expected_io; 6060 uint64_t src_offset, num_blocks; 6061 uint32_t num_completed; 6062 int rc; 6063 6064 ut_init_bdev(NULL); 6065 bdev = allocate_bdev("bdev"); 6066 6067 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6068 CU_ASSERT_EQUAL(rc, 0); 6069 SPDK_CU_ASSERT_FATAL(desc != NULL); 6070 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6071 ioch = spdk_bdev_get_io_channel(desc); 6072 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6073 6074 fn_table.submit_request = stub_submit_request; 6075 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6076 6077 /* First test that if the bdev supports copy, the request won't be split */ 6078 bdev->md_len = 0; 6079 bdev->blocklen = 512; 6080 num_blocks = 128; 6081 src_offset = bdev->blockcnt - num_blocks; 6082 6083 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6084 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6085 6086 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6087 CU_ASSERT_EQUAL(rc, 0); 6088 num_completed = stub_complete_io(1); 6089 CU_ASSERT_EQUAL(num_completed, 1); 6090 6091 /* Check that if copy is not supported it'll still work */ 6092 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6093 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6094 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6095 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6096 6097 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6098 6099 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6100 CU_ASSERT_EQUAL(rc, 0); 6101 num_completed = stub_complete_io(1); 6102 CU_ASSERT_EQUAL(num_completed, 1); 6103 num_completed = stub_complete_io(1); 6104 CU_ASSERT_EQUAL(num_completed, 1); 6105 6106 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6107 spdk_put_io_channel(ioch); 6108 spdk_bdev_close(desc); 6109 free_bdev(bdev); 6110 ut_fini_bdev(); 6111 } 6112 6113 static void 6114 bdev_copy_split_test(void) 6115 { 6116 struct spdk_bdev *bdev; 6117 struct spdk_bdev_desc *desc = NULL; 6118 struct spdk_io_channel *ioch; 6119 struct spdk_bdev_channel *bdev_ch; 6120 struct ut_expected_io *expected_io; 6121 struct spdk_bdev_opts bdev_opts = {}; 6122 uint32_t i, num_outstanding; 6123 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6124 int rc; 6125 6126 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6127 bdev_opts.bdev_io_pool_size = 512; 6128 bdev_opts.bdev_io_cache_size = 64; 6129 rc = spdk_bdev_set_opts(&bdev_opts); 6130 CU_ASSERT(rc == 0); 6131 6132 ut_init_bdev(NULL); 6133 bdev = allocate_bdev("bdev"); 6134 6135 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6136 CU_ASSERT_EQUAL(rc, 0); 6137 SPDK_CU_ASSERT_FATAL(desc != NULL); 6138 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6139 ioch = spdk_bdev_get_io_channel(desc); 6140 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6141 bdev_ch = spdk_io_channel_get_ctx(ioch); 6142 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6143 6144 fn_table.submit_request = stub_submit_request; 6145 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6146 6147 /* Case 1: First test the request won't be split */ 6148 num_blocks = 32; 6149 src_offset = bdev->blockcnt - num_blocks; 6150 6151 g_io_done = false; 6152 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6153 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6154 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6155 CU_ASSERT_EQUAL(rc, 0); 6156 CU_ASSERT(g_io_done == false); 6157 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6158 stub_complete_io(1); 6159 CU_ASSERT(g_io_done == true); 6160 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6161 6162 /* Case 2: Test the split with 2 children requests */ 6163 max_copy_blocks = 8; 6164 bdev->max_copy = max_copy_blocks; 6165 num_children = 2; 6166 num_blocks = max_copy_blocks * num_children; 6167 offset = 0; 6168 src_offset = bdev->blockcnt - num_blocks; 6169 6170 g_io_done = false; 6171 for (i = 0; i < num_children; i++) { 6172 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6173 src_offset + offset, max_copy_blocks); 6174 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6175 offset += max_copy_blocks; 6176 } 6177 6178 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6179 CU_ASSERT_EQUAL(rc, 0); 6180 CU_ASSERT(g_io_done == false); 6181 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6182 stub_complete_io(num_children); 6183 CU_ASSERT(g_io_done == true); 6184 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6185 6186 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6187 num_children = 15; 6188 num_blocks = max_copy_blocks * num_children; 6189 offset = 0; 6190 src_offset = bdev->blockcnt - num_blocks; 6191 6192 g_io_done = false; 6193 for (i = 0; i < num_children; i++) { 6194 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6195 src_offset + offset, max_copy_blocks); 6196 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6197 offset += max_copy_blocks; 6198 } 6199 6200 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6201 CU_ASSERT_EQUAL(rc, 0); 6202 CU_ASSERT(g_io_done == false); 6203 6204 while (num_children > 0) { 6205 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6206 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6207 stub_complete_io(num_outstanding); 6208 num_children -= num_outstanding; 6209 } 6210 CU_ASSERT(g_io_done == true); 6211 6212 spdk_put_io_channel(ioch); 6213 spdk_bdev_close(desc); 6214 free_bdev(bdev); 6215 ut_fini_bdev(); 6216 } 6217 6218 static void 6219 examine_claim_v1(struct spdk_bdev *bdev) 6220 { 6221 int rc; 6222 6223 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6224 CU_ASSERT(rc == 0); 6225 } 6226 6227 static void 6228 examine_no_lock_held(struct spdk_bdev *bdev) 6229 { 6230 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6231 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6232 } 6233 6234 struct examine_claim_v2_ctx { 6235 struct ut_examine_ctx examine_ctx; 6236 enum spdk_bdev_claim_type claim_type; 6237 struct spdk_bdev_desc *desc; 6238 }; 6239 6240 static void 6241 examine_claim_v2(struct spdk_bdev *bdev) 6242 { 6243 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6244 int rc; 6245 6246 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6247 CU_ASSERT(rc == 0); 6248 6249 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6250 CU_ASSERT(rc == 0); 6251 } 6252 6253 static void 6254 examine_locks(void) 6255 { 6256 struct spdk_bdev *bdev; 6257 struct ut_examine_ctx ctx = { 0 }; 6258 struct examine_claim_v2_ctx v2_ctx; 6259 6260 /* Without any claims, one code path is taken */ 6261 ctx.examine_config = examine_no_lock_held; 6262 ctx.examine_disk = examine_no_lock_held; 6263 bdev = allocate_bdev_ctx("bdev0", &ctx); 6264 CU_ASSERT(ctx.examine_config_count == 1); 6265 CU_ASSERT(ctx.examine_disk_count == 1); 6266 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6267 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6268 free_bdev(bdev); 6269 6270 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6271 memset(&ctx, 0, sizeof(ctx)); 6272 ctx.examine_config = examine_claim_v1; 6273 ctx.examine_disk = examine_no_lock_held; 6274 bdev = allocate_bdev_ctx("bdev0", &ctx); 6275 CU_ASSERT(ctx.examine_config_count == 1); 6276 CU_ASSERT(ctx.examine_disk_count == 1); 6277 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6278 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6279 spdk_bdev_module_release_bdev(bdev); 6280 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6281 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6282 free_bdev(bdev); 6283 6284 /* Exercise the final path that comes with v2 claims. */ 6285 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6286 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6287 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6288 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6289 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6290 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6291 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6292 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6293 spdk_bdev_close(v2_ctx.desc); 6294 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6295 free_bdev(bdev); 6296 } 6297 6298 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6299 do { \ 6300 uint32_t len = 0; \ 6301 struct spdk_bdev_module_claim *claim; \ 6302 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6303 len++; \ 6304 } \ 6305 CU_ASSERT(len == expect); \ 6306 } while (0) 6307 6308 static void 6309 claim_v2_rwo(void) 6310 { 6311 struct spdk_bdev *bdev; 6312 struct spdk_bdev_desc *desc; 6313 struct spdk_bdev_desc *desc2; 6314 struct spdk_bdev_claim_opts opts; 6315 int rc; 6316 6317 bdev = allocate_bdev("bdev0"); 6318 6319 /* Claim without options */ 6320 desc = NULL; 6321 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6322 CU_ASSERT(rc == 0); 6323 SPDK_CU_ASSERT_FATAL(desc != NULL); 6324 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6325 &bdev_ut_if); 6326 CU_ASSERT(rc == 0); 6327 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6328 CU_ASSERT(desc->claim != NULL); 6329 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6330 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6331 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6332 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6333 6334 /* Release the claim by closing the descriptor */ 6335 spdk_bdev_close(desc); 6336 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6337 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6338 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6339 6340 /* Claim with options */ 6341 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6342 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6343 desc = NULL; 6344 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6345 CU_ASSERT(rc == 0); 6346 SPDK_CU_ASSERT_FATAL(desc != NULL); 6347 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6348 &bdev_ut_if); 6349 CU_ASSERT(rc == 0); 6350 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6351 CU_ASSERT(desc->claim != NULL); 6352 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6353 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6354 memset(&opts, 0, sizeof(opts)); 6355 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6356 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6357 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6358 6359 /* The claim blocks new writers. */ 6360 desc2 = NULL; 6361 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6362 CU_ASSERT(rc == -EPERM); 6363 CU_ASSERT(desc2 == NULL); 6364 6365 /* New readers are allowed */ 6366 desc2 = NULL; 6367 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6368 CU_ASSERT(rc == 0); 6369 CU_ASSERT(desc2 != NULL); 6370 CU_ASSERT(!desc2->write); 6371 6372 /* No new v2 RWO claims are allowed */ 6373 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6374 &bdev_ut_if); 6375 CU_ASSERT(rc == -EPERM); 6376 6377 /* No new v2 ROM claims are allowed */ 6378 CU_ASSERT(!desc2->write); 6379 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6380 &bdev_ut_if); 6381 CU_ASSERT(rc == -EPERM); 6382 CU_ASSERT(!desc2->write); 6383 6384 /* No new v2 RWM claims are allowed */ 6385 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6386 opts.shared_claim_key = (uint64_t)&opts; 6387 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6388 &bdev_ut_if); 6389 CU_ASSERT(rc == -EPERM); 6390 CU_ASSERT(!desc2->write); 6391 6392 /* No new v1 claims are allowed */ 6393 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6394 CU_ASSERT(rc == -EPERM); 6395 6396 /* None of the above changed the existing claim */ 6397 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6398 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6399 6400 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6401 spdk_bdev_close(desc); 6402 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6403 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6404 CU_ASSERT(!desc2->write); 6405 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6406 &bdev_ut_if); 6407 CU_ASSERT(rc == 0); 6408 CU_ASSERT(desc2->claim != NULL); 6409 CU_ASSERT(desc2->write); 6410 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6411 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6412 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6413 spdk_bdev_close(desc2); 6414 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6415 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6416 6417 /* Cannot claim with a key */ 6418 desc = NULL; 6419 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6420 CU_ASSERT(rc == 0); 6421 SPDK_CU_ASSERT_FATAL(desc != NULL); 6422 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6423 opts.shared_claim_key = (uint64_t)&opts; 6424 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6425 &bdev_ut_if); 6426 CU_ASSERT(rc == -EINVAL); 6427 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6428 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6429 spdk_bdev_close(desc); 6430 6431 /* Clean up */ 6432 free_bdev(bdev); 6433 } 6434 6435 static void 6436 claim_v2_rom(void) 6437 { 6438 struct spdk_bdev *bdev; 6439 struct spdk_bdev_desc *desc; 6440 struct spdk_bdev_desc *desc2; 6441 struct spdk_bdev_claim_opts opts; 6442 int rc; 6443 6444 bdev = allocate_bdev("bdev0"); 6445 6446 /* Claim without options */ 6447 desc = NULL; 6448 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6449 CU_ASSERT(rc == 0); 6450 SPDK_CU_ASSERT_FATAL(desc != NULL); 6451 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6452 &bdev_ut_if); 6453 CU_ASSERT(rc == 0); 6454 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6455 CU_ASSERT(desc->claim != NULL); 6456 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6457 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6458 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6459 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6460 6461 /* Release the claim by closing the descriptor */ 6462 spdk_bdev_close(desc); 6463 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6464 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6465 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6466 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6467 6468 /* Claim with options */ 6469 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6470 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6471 desc = NULL; 6472 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6473 CU_ASSERT(rc == 0); 6474 SPDK_CU_ASSERT_FATAL(desc != NULL); 6475 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6476 &bdev_ut_if); 6477 CU_ASSERT(rc == 0); 6478 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6479 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6480 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6481 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6482 memset(&opts, 0, sizeof(opts)); 6483 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6484 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6485 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6486 6487 /* The claim blocks new writers. */ 6488 desc2 = NULL; 6489 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6490 CU_ASSERT(rc == -EPERM); 6491 CU_ASSERT(desc2 == NULL); 6492 6493 /* New readers are allowed */ 6494 desc2 = NULL; 6495 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6496 CU_ASSERT(rc == 0); 6497 CU_ASSERT(desc2 != NULL); 6498 CU_ASSERT(!desc2->write); 6499 6500 /* No new v2 RWO claims are allowed */ 6501 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6502 &bdev_ut_if); 6503 CU_ASSERT(rc == -EPERM); 6504 6505 /* No new v2 RWM claims are allowed */ 6506 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6507 opts.shared_claim_key = (uint64_t)&opts; 6508 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6509 &bdev_ut_if); 6510 CU_ASSERT(rc == -EPERM); 6511 CU_ASSERT(!desc2->write); 6512 6513 /* No new v1 claims are allowed */ 6514 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6515 CU_ASSERT(rc == -EPERM); 6516 6517 /* None of the above messed up the existing claim */ 6518 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6519 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6520 6521 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 6522 CU_ASSERT(!desc2->write); 6523 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6524 &bdev_ut_if); 6525 CU_ASSERT(rc == 0); 6526 CU_ASSERT(!desc2->write); 6527 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6528 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6529 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6530 6531 /* Claim remains when closing the first descriptor */ 6532 spdk_bdev_close(desc); 6533 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6534 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6535 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6536 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6537 6538 /* Claim removed when closing the other descriptor */ 6539 spdk_bdev_close(desc2); 6540 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6541 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6542 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6543 6544 /* Cannot claim with a key */ 6545 desc = NULL; 6546 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6547 CU_ASSERT(rc == 0); 6548 SPDK_CU_ASSERT_FATAL(desc != NULL); 6549 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6550 opts.shared_claim_key = (uint64_t)&opts; 6551 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6552 &bdev_ut_if); 6553 CU_ASSERT(rc == -EINVAL); 6554 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6555 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6556 spdk_bdev_close(desc); 6557 6558 /* Cannot claim with a read-write descriptor */ 6559 desc = NULL; 6560 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6561 CU_ASSERT(rc == 0); 6562 SPDK_CU_ASSERT_FATAL(desc != NULL); 6563 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6564 &bdev_ut_if); 6565 CU_ASSERT(rc == -EINVAL); 6566 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6567 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6568 spdk_bdev_close(desc); 6569 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6570 6571 /* Clean up */ 6572 free_bdev(bdev); 6573 } 6574 6575 static void 6576 claim_v2_rwm(void) 6577 { 6578 struct spdk_bdev *bdev; 6579 struct spdk_bdev_desc *desc; 6580 struct spdk_bdev_desc *desc2; 6581 struct spdk_bdev_claim_opts opts; 6582 char good_key, bad_key; 6583 int rc; 6584 6585 bdev = allocate_bdev("bdev0"); 6586 6587 /* Claim without options should fail */ 6588 desc = NULL; 6589 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6590 CU_ASSERT(rc == 0); 6591 SPDK_CU_ASSERT_FATAL(desc != NULL); 6592 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 6593 &bdev_ut_if); 6594 CU_ASSERT(rc == -EINVAL); 6595 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6596 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6597 CU_ASSERT(desc->claim == NULL); 6598 6599 /* Claim with options */ 6600 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6601 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6602 opts.shared_claim_key = (uint64_t)&good_key; 6603 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6604 &bdev_ut_if); 6605 CU_ASSERT(rc == 0); 6606 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6607 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6608 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6609 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6610 memset(&opts, 0, sizeof(opts)); 6611 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6612 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6613 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6614 6615 /* The claim blocks new writers. */ 6616 desc2 = NULL; 6617 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6618 CU_ASSERT(rc == -EPERM); 6619 CU_ASSERT(desc2 == NULL); 6620 6621 /* New readers are allowed */ 6622 desc2 = NULL; 6623 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6624 CU_ASSERT(rc == 0); 6625 CU_ASSERT(desc2 != NULL); 6626 CU_ASSERT(!desc2->write); 6627 6628 /* No new v2 RWO claims are allowed */ 6629 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6630 &bdev_ut_if); 6631 CU_ASSERT(rc == -EPERM); 6632 6633 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 6634 CU_ASSERT(!desc2->write); 6635 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6636 &bdev_ut_if); 6637 CU_ASSERT(rc == -EPERM); 6638 CU_ASSERT(!desc2->write); 6639 6640 /* No new v1 claims are allowed */ 6641 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6642 CU_ASSERT(rc == -EPERM); 6643 6644 /* No new v2 RWM claims are allowed if the key does not match */ 6645 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6646 opts.shared_claim_key = (uint64_t)&bad_key; 6647 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6648 &bdev_ut_if); 6649 CU_ASSERT(rc == -EPERM); 6650 CU_ASSERT(!desc2->write); 6651 6652 /* None of the above messed up the existing claim */ 6653 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6654 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6655 6656 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 6657 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6658 opts.shared_claim_key = (uint64_t)&good_key; 6659 CU_ASSERT(!desc2->write); 6660 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6661 &bdev_ut_if); 6662 CU_ASSERT(rc == 0); 6663 CU_ASSERT(desc2->write); 6664 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6665 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6666 6667 /* Claim remains when closing the first descriptor */ 6668 spdk_bdev_close(desc); 6669 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6670 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6671 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6672 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6673 6674 /* Claim removed when closing the other descriptor */ 6675 spdk_bdev_close(desc2); 6676 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6677 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6678 6679 /* Cannot claim without a key */ 6680 desc = NULL; 6681 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6682 CU_ASSERT(rc == 0); 6683 SPDK_CU_ASSERT_FATAL(desc != NULL); 6684 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6685 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6686 &bdev_ut_if); 6687 CU_ASSERT(rc == -EINVAL); 6688 spdk_bdev_close(desc); 6689 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6690 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6691 6692 /* Clean up */ 6693 free_bdev(bdev); 6694 } 6695 6696 static void 6697 claim_v2_existing_writer(void) 6698 { 6699 struct spdk_bdev *bdev; 6700 struct spdk_bdev_desc *desc; 6701 struct spdk_bdev_desc *desc2; 6702 struct spdk_bdev_claim_opts opts; 6703 enum spdk_bdev_claim_type type; 6704 enum spdk_bdev_claim_type types[] = { 6705 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6706 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6707 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6708 }; 6709 size_t i; 6710 int rc; 6711 6712 bdev = allocate_bdev("bdev0"); 6713 6714 desc = NULL; 6715 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6716 CU_ASSERT(rc == 0); 6717 SPDK_CU_ASSERT_FATAL(desc != NULL); 6718 desc2 = NULL; 6719 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6720 CU_ASSERT(rc == 0); 6721 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 6722 6723 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6724 type = types[i]; 6725 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6726 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6727 opts.shared_claim_key = (uint64_t)&opts; 6728 } 6729 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6730 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 6731 CU_ASSERT(rc == -EINVAL); 6732 } else { 6733 CU_ASSERT(rc == -EPERM); 6734 } 6735 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6736 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 6737 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 6738 CU_ASSERT(rc == -EINVAL); 6739 } else { 6740 CU_ASSERT(rc == -EPERM); 6741 } 6742 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6743 } 6744 6745 spdk_bdev_close(desc); 6746 spdk_bdev_close(desc2); 6747 6748 /* Clean up */ 6749 free_bdev(bdev); 6750 } 6751 6752 static void 6753 claim_v2_existing_v1(void) 6754 { 6755 struct spdk_bdev *bdev; 6756 struct spdk_bdev_desc *desc; 6757 struct spdk_bdev_claim_opts opts; 6758 enum spdk_bdev_claim_type type; 6759 enum spdk_bdev_claim_type types[] = { 6760 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6761 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6762 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6763 }; 6764 size_t i; 6765 int rc; 6766 6767 bdev = allocate_bdev("bdev0"); 6768 6769 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6770 CU_ASSERT(rc == 0); 6771 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6772 6773 desc = NULL; 6774 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6775 CU_ASSERT(rc == 0); 6776 SPDK_CU_ASSERT_FATAL(desc != NULL); 6777 6778 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6779 type = types[i]; 6780 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6781 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6782 opts.shared_claim_key = (uint64_t)&opts; 6783 } 6784 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6785 CU_ASSERT(rc == -EPERM); 6786 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6787 } 6788 6789 spdk_bdev_module_release_bdev(bdev); 6790 spdk_bdev_close(desc); 6791 6792 /* Clean up */ 6793 free_bdev(bdev); 6794 } 6795 6796 static void 6797 claim_v1_existing_v2(void) 6798 { 6799 struct spdk_bdev *bdev; 6800 struct spdk_bdev_desc *desc; 6801 struct spdk_bdev_claim_opts opts; 6802 enum spdk_bdev_claim_type type; 6803 enum spdk_bdev_claim_type types[] = { 6804 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6805 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6806 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6807 }; 6808 size_t i; 6809 int rc; 6810 6811 bdev = allocate_bdev("bdev0"); 6812 6813 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6814 type = types[i]; 6815 6816 desc = NULL; 6817 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6818 CU_ASSERT(rc == 0); 6819 SPDK_CU_ASSERT_FATAL(desc != NULL); 6820 6821 /* Get a v2 claim */ 6822 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6823 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6824 opts.shared_claim_key = (uint64_t)&opts; 6825 } 6826 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6827 CU_ASSERT(rc == 0); 6828 6829 /* Fail to get a v1 claim */ 6830 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6831 CU_ASSERT(rc == -EPERM); 6832 6833 spdk_bdev_close(desc); 6834 6835 /* Now v1 succeeds */ 6836 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6837 CU_ASSERT(rc == 0) 6838 spdk_bdev_module_release_bdev(bdev); 6839 } 6840 6841 /* Clean up */ 6842 free_bdev(bdev); 6843 } 6844 6845 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 6846 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 6847 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 6848 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 6849 6850 #define UT_MAX_EXAMINE_MODS 2 6851 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 6852 { 6853 .name = "vbdev_ut_examine0", 6854 .module_init = vbdev_ut_module_init, 6855 .module_fini = vbdev_ut_module_fini, 6856 .examine_config = ut_examine_claimed_config0, 6857 .examine_disk = ut_examine_claimed_disk0, 6858 }, 6859 { 6860 .name = "vbdev_ut_examine1", 6861 .module_init = vbdev_ut_module_init, 6862 .module_fini = vbdev_ut_module_fini, 6863 .examine_config = ut_examine_claimed_config1, 6864 .examine_disk = ut_examine_claimed_disk1, 6865 } 6866 }; 6867 6868 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 6869 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 6870 6871 struct ut_examine_claimed_ctx { 6872 uint32_t examine_config_count; 6873 uint32_t examine_disk_count; 6874 6875 /* Claim type to take, with these options */ 6876 enum spdk_bdev_claim_type claim_type; 6877 struct spdk_bdev_claim_opts claim_opts; 6878 6879 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 6880 int expect_claim_err; 6881 6882 /* Descriptor used for a claim */ 6883 struct spdk_bdev_desc *desc; 6884 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 6885 6886 bool ut_testing_examine_claimed; 6887 6888 static void 6889 reset_examine_claimed_ctx(void) 6890 { 6891 struct ut_examine_claimed_ctx *ctx; 6892 uint32_t i; 6893 6894 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 6895 ctx = &examine_claimed_ctx[i]; 6896 if (ctx->desc != NULL) { 6897 spdk_bdev_close(ctx->desc); 6898 } 6899 memset(ctx, 0, sizeof(*ctx)); 6900 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 6901 } 6902 } 6903 6904 static void 6905 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 6906 { 6907 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 6908 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 6909 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 6910 int rc; 6911 6912 if (!ut_testing_examine_claimed) { 6913 spdk_bdev_module_examine_done(module); 6914 return; 6915 } 6916 6917 ctx->examine_config_count++; 6918 6919 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 6920 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 6921 &ctx->desc); 6922 CU_ASSERT(rc == 0); 6923 6924 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 6925 CU_ASSERT(rc == ctx->expect_claim_err); 6926 } 6927 spdk_bdev_module_examine_done(module); 6928 } 6929 6930 static void 6931 ut_examine_claimed_config0(struct spdk_bdev *bdev) 6932 { 6933 examine_claimed_config(bdev, 0); 6934 } 6935 6936 static void 6937 ut_examine_claimed_config1(struct spdk_bdev *bdev) 6938 { 6939 examine_claimed_config(bdev, 1); 6940 } 6941 6942 static void 6943 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 6944 { 6945 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 6946 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 6947 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 6948 6949 if (!ut_testing_examine_claimed) { 6950 spdk_bdev_module_examine_done(module); 6951 return; 6952 } 6953 6954 ctx->examine_disk_count++; 6955 6956 spdk_bdev_module_examine_done(module); 6957 } 6958 6959 static void 6960 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 6961 { 6962 examine_claimed_disk(bdev, 0); 6963 } 6964 6965 static void 6966 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 6967 { 6968 examine_claimed_disk(bdev, 1); 6969 } 6970 6971 static void 6972 examine_claimed(void) 6973 { 6974 struct spdk_bdev *bdev; 6975 struct spdk_bdev_module *mod = examine_claimed_mods; 6976 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 6977 6978 ut_testing_examine_claimed = true; 6979 reset_examine_claimed_ctx(); 6980 6981 /* 6982 * With one module claiming, both modules' examine_config should be called, but only the 6983 * claiming module's examine_disk should be called. 6984 */ 6985 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6986 bdev = allocate_bdev("bdev0"); 6987 CU_ASSERT(ctx[0].examine_config_count == 1); 6988 CU_ASSERT(ctx[0].examine_disk_count == 1); 6989 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 6990 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 6991 CU_ASSERT(ctx[1].examine_config_count == 1); 6992 CU_ASSERT(ctx[1].examine_disk_count == 0); 6993 CU_ASSERT(ctx[1].desc == NULL); 6994 reset_examine_claimed_ctx(); 6995 free_bdev(bdev); 6996 6997 /* 6998 * With two modules claiming, both modules' examine_config and examine_disk should be 6999 * called. 7000 */ 7001 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7002 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7003 bdev = allocate_bdev("bdev0"); 7004 CU_ASSERT(ctx[0].examine_config_count == 1); 7005 CU_ASSERT(ctx[0].examine_disk_count == 1); 7006 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7007 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7008 CU_ASSERT(ctx[1].examine_config_count == 1); 7009 CU_ASSERT(ctx[1].examine_disk_count == 1); 7010 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7011 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7012 reset_examine_claimed_ctx(); 7013 free_bdev(bdev); 7014 7015 /* 7016 * If two vbdev modules try to claim with conflicting claim types, the module that was added 7017 * last wins. The winner gets the claim and is the only one that has its examine_disk 7018 * callback invoked. 7019 */ 7020 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7021 ctx[0].expect_claim_err = -EPERM; 7022 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 7023 bdev = allocate_bdev("bdev0"); 7024 CU_ASSERT(ctx[0].examine_config_count == 1); 7025 CU_ASSERT(ctx[0].examine_disk_count == 0); 7026 CU_ASSERT(ctx[1].examine_config_count == 1); 7027 CU_ASSERT(ctx[1].examine_disk_count == 1); 7028 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7029 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7030 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7031 reset_examine_claimed_ctx(); 7032 free_bdev(bdev); 7033 7034 ut_testing_examine_claimed = false; 7035 } 7036 7037 int 7038 main(int argc, char **argv) 7039 { 7040 CU_pSuite suite = NULL; 7041 unsigned int num_failures; 7042 7043 CU_set_error_action(CUEA_ABORT); 7044 CU_initialize_registry(); 7045 7046 suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown); 7047 7048 CU_ADD_TEST(suite, bytes_to_blocks_test); 7049 CU_ADD_TEST(suite, num_blocks_test); 7050 CU_ADD_TEST(suite, io_valid_test); 7051 CU_ADD_TEST(suite, open_write_test); 7052 CU_ADD_TEST(suite, claim_test); 7053 CU_ADD_TEST(suite, alias_add_del_test); 7054 CU_ADD_TEST(suite, get_device_stat_test); 7055 CU_ADD_TEST(suite, bdev_io_types_test); 7056 CU_ADD_TEST(suite, bdev_io_wait_test); 7057 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7058 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7059 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7060 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7061 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7062 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7063 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7064 CU_ADD_TEST(suite, bdev_io_alignment); 7065 CU_ADD_TEST(suite, bdev_histograms); 7066 CU_ADD_TEST(suite, bdev_write_zeroes); 7067 CU_ADD_TEST(suite, bdev_compare_and_write); 7068 CU_ADD_TEST(suite, bdev_compare); 7069 CU_ADD_TEST(suite, bdev_compare_emulated); 7070 CU_ADD_TEST(suite, bdev_zcopy_write); 7071 CU_ADD_TEST(suite, bdev_zcopy_read); 7072 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7073 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7074 CU_ADD_TEST(suite, bdev_open_ext); 7075 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7076 CU_ADD_TEST(suite, bdev_set_io_timeout); 7077 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7078 CU_ADD_TEST(suite, lba_range_overlap); 7079 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7080 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7081 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7082 CU_ADD_TEST(suite, bdev_io_abort); 7083 CU_ADD_TEST(suite, bdev_unmap); 7084 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7085 CU_ADD_TEST(suite, bdev_set_options_test); 7086 CU_ADD_TEST(suite, bdev_multi_allocation); 7087 CU_ADD_TEST(suite, bdev_get_memory_domains); 7088 CU_ADD_TEST(suite, bdev_io_ext); 7089 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7090 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7091 CU_ADD_TEST(suite, bdev_io_ext_split); 7092 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7093 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7094 CU_ADD_TEST(suite, bdev_unregister_by_name); 7095 CU_ADD_TEST(suite, for_each_bdev_test); 7096 CU_ADD_TEST(suite, bdev_seek_test); 7097 CU_ADD_TEST(suite, bdev_copy); 7098 CU_ADD_TEST(suite, bdev_copy_split_test); 7099 CU_ADD_TEST(suite, examine_locks); 7100 CU_ADD_TEST(suite, claim_v2_rwo); 7101 CU_ADD_TEST(suite, claim_v2_rom); 7102 CU_ADD_TEST(suite, claim_v2_rwm); 7103 CU_ADD_TEST(suite, claim_v2_existing_writer); 7104 CU_ADD_TEST(suite, claim_v2_existing_v1); 7105 CU_ADD_TEST(suite, claim_v1_existing_v2); 7106 CU_ADD_TEST(suite, examine_claimed); 7107 7108 allocate_cores(1); 7109 allocate_threads(1); 7110 set_thread(0); 7111 7112 CU_basic_set_mode(CU_BRM_VERBOSE); 7113 CU_basic_run_tests(); 7114 num_failures = CU_get_number_of_failures(); 7115 CU_cleanup_registry(); 7116 7117 free_threads(); 7118 free_cores(); 7119 7120 return num_failures; 7121 } 7122