1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 DEFINE_STUB(spdk_accel_sequence_finish, int, 25 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 26 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 27 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 28 DEFINE_STUB(spdk_accel_append_copy, int, 29 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs, 30 uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 31 struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, 32 void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 33 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL); 34 35 static bool g_memory_domain_pull_data_called; 36 static bool g_memory_domain_push_data_called; 37 static int g_accel_io_device; 38 39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 40 int 41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 42 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 43 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 44 { 45 g_memory_domain_pull_data_called = true; 46 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 47 cpl_cb(cpl_cb_arg, 0); 48 return 0; 49 } 50 51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 52 int 53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 54 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 55 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 56 { 57 g_memory_domain_push_data_called = true; 58 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 59 cpl_cb(cpl_cb_arg, 0); 60 return 0; 61 } 62 63 struct spdk_io_channel * 64 spdk_accel_get_io_channel(void) 65 { 66 return spdk_get_io_channel(&g_accel_io_device); 67 } 68 69 int g_status; 70 int g_count; 71 enum spdk_bdev_event_type g_event_type1; 72 enum spdk_bdev_event_type g_event_type2; 73 enum spdk_bdev_event_type g_event_type3; 74 enum spdk_bdev_event_type g_event_type4; 75 struct spdk_histogram_data *g_histogram; 76 void *g_unregister_arg; 77 int g_unregister_rc; 78 79 void 80 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 81 int *sc, int *sk, int *asc, int *ascq) 82 { 83 } 84 85 static int 86 ut_accel_ch_create_cb(void *io_device, void *ctx) 87 { 88 return 0; 89 } 90 91 static void 92 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 93 { 94 } 95 96 static int 97 ut_bdev_setup(void) 98 { 99 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 100 ut_accel_ch_destroy_cb, 0, NULL); 101 return 0; 102 } 103 104 static int 105 ut_bdev_teardown(void) 106 { 107 spdk_io_device_unregister(&g_accel_io_device, NULL); 108 109 return 0; 110 } 111 112 static int 113 stub_destruct(void *ctx) 114 { 115 return 0; 116 } 117 118 struct ut_expected_io { 119 uint8_t type; 120 uint64_t offset; 121 uint64_t src_offset; 122 uint64_t length; 123 int iovcnt; 124 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 125 void *md_buf; 126 TAILQ_ENTRY(ut_expected_io) link; 127 }; 128 129 struct bdev_ut_channel { 130 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 131 uint32_t outstanding_io_count; 132 TAILQ_HEAD(, ut_expected_io) expected_io; 133 }; 134 135 static bool g_io_done; 136 static struct spdk_bdev_io *g_bdev_io; 137 static enum spdk_bdev_io_status g_io_status; 138 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 139 static uint32_t g_bdev_ut_io_device; 140 static struct bdev_ut_channel *g_bdev_ut_channel; 141 static void *g_compare_read_buf; 142 static uint32_t g_compare_read_buf_len; 143 static void *g_compare_write_buf; 144 static uint32_t g_compare_write_buf_len; 145 static void *g_compare_md_buf; 146 static bool g_abort_done; 147 static enum spdk_bdev_io_status g_abort_status; 148 static void *g_zcopy_read_buf; 149 static uint32_t g_zcopy_read_buf_len; 150 static void *g_zcopy_write_buf; 151 static uint32_t g_zcopy_write_buf_len; 152 static struct spdk_bdev_io *g_zcopy_bdev_io; 153 static uint64_t g_seek_data_offset; 154 static uint64_t g_seek_hole_offset; 155 static uint64_t g_seek_offset; 156 157 static struct ut_expected_io * 158 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 159 { 160 struct ut_expected_io *expected_io; 161 162 expected_io = calloc(1, sizeof(*expected_io)); 163 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 164 165 expected_io->type = type; 166 expected_io->offset = offset; 167 expected_io->length = length; 168 expected_io->iovcnt = iovcnt; 169 170 return expected_io; 171 } 172 173 static struct ut_expected_io * 174 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 175 { 176 struct ut_expected_io *expected_io; 177 178 expected_io = calloc(1, sizeof(*expected_io)); 179 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 180 181 expected_io->type = type; 182 expected_io->offset = offset; 183 expected_io->src_offset = src_offset; 184 expected_io->length = length; 185 186 return expected_io; 187 } 188 189 static void 190 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 191 { 192 expected_io->iov[pos].iov_base = base; 193 expected_io->iov[pos].iov_len = len; 194 } 195 196 static void 197 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 198 { 199 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 200 struct ut_expected_io *expected_io; 201 struct iovec *iov, *expected_iov; 202 struct spdk_bdev_io *bio_to_abort; 203 int i; 204 205 g_bdev_io = bdev_io; 206 207 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 208 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 209 210 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 211 CU_ASSERT(g_compare_read_buf_len == len); 212 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 213 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 214 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 215 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 216 } 217 } 218 219 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 220 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 221 222 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 223 CU_ASSERT(g_compare_write_buf_len == len); 224 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 225 } 226 227 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 228 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 229 230 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 231 CU_ASSERT(g_compare_read_buf_len == len); 232 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 233 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 234 } 235 if (bdev_io->u.bdev.md_buf && 236 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 237 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 238 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 239 } 240 } 241 242 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 243 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 244 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 245 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 246 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 247 ch->outstanding_io_count--; 248 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 249 break; 250 } 251 } 252 } 253 } 254 255 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 256 if (bdev_io->u.bdev.zcopy.start) { 257 g_zcopy_bdev_io = bdev_io; 258 if (bdev_io->u.bdev.zcopy.populate) { 259 /* Start of a read */ 260 CU_ASSERT(g_zcopy_read_buf != NULL); 261 CU_ASSERT(g_zcopy_read_buf_len > 0); 262 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 263 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 264 bdev_io->u.bdev.iovcnt = 1; 265 } else { 266 /* Start of a write */ 267 CU_ASSERT(g_zcopy_write_buf != NULL); 268 CU_ASSERT(g_zcopy_write_buf_len > 0); 269 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 270 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 271 bdev_io->u.bdev.iovcnt = 1; 272 } 273 } else { 274 if (bdev_io->u.bdev.zcopy.commit) { 275 /* End of write */ 276 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 277 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 278 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 279 g_zcopy_write_buf = NULL; 280 g_zcopy_write_buf_len = 0; 281 } else { 282 /* End of read */ 283 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 284 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 285 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 286 g_zcopy_read_buf = NULL; 287 g_zcopy_read_buf_len = 0; 288 } 289 } 290 } 291 292 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 293 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 294 } 295 296 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 297 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 298 } 299 300 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 301 ch->outstanding_io_count++; 302 303 expected_io = TAILQ_FIRST(&ch->expected_io); 304 if (expected_io == NULL) { 305 return; 306 } 307 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 308 309 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 310 CU_ASSERT(bdev_io->type == expected_io->type); 311 } 312 313 if (expected_io->md_buf != NULL) { 314 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 315 } 316 317 if (expected_io->length == 0) { 318 free(expected_io); 319 return; 320 } 321 322 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 323 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 324 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 325 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 326 } 327 328 if (expected_io->iovcnt == 0) { 329 free(expected_io); 330 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 331 return; 332 } 333 334 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 335 for (i = 0; i < expected_io->iovcnt; i++) { 336 expected_iov = &expected_io->iov[i]; 337 if (bdev_io->internal.orig_iovcnt == 0) { 338 iov = &bdev_io->u.bdev.iovs[i]; 339 } else { 340 iov = bdev_io->internal.orig_iovs; 341 } 342 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 343 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 344 } 345 346 free(expected_io); 347 } 348 349 static void 350 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 351 struct spdk_bdev_io *bdev_io, bool success) 352 { 353 CU_ASSERT(success == true); 354 355 stub_submit_request(_ch, bdev_io); 356 } 357 358 static void 359 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 360 { 361 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 362 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 363 } 364 365 static uint32_t 366 stub_complete_io(uint32_t num_to_complete) 367 { 368 struct bdev_ut_channel *ch = g_bdev_ut_channel; 369 struct spdk_bdev_io *bdev_io; 370 static enum spdk_bdev_io_status io_status; 371 uint32_t num_completed = 0; 372 373 while (num_completed < num_to_complete) { 374 if (TAILQ_EMPTY(&ch->outstanding_io)) { 375 break; 376 } 377 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 378 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 379 ch->outstanding_io_count--; 380 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 381 g_io_exp_status; 382 spdk_bdev_io_complete(bdev_io, io_status); 383 num_completed++; 384 } 385 386 return num_completed; 387 } 388 389 static struct spdk_io_channel * 390 bdev_ut_get_io_channel(void *ctx) 391 { 392 return spdk_get_io_channel(&g_bdev_ut_io_device); 393 } 394 395 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 396 [SPDK_BDEV_IO_TYPE_READ] = true, 397 [SPDK_BDEV_IO_TYPE_WRITE] = true, 398 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 399 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 400 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 401 [SPDK_BDEV_IO_TYPE_RESET] = true, 402 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 403 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 404 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 405 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 406 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 407 [SPDK_BDEV_IO_TYPE_ABORT] = true, 408 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 409 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 410 [SPDK_BDEV_IO_TYPE_COPY] = true, 411 }; 412 413 static void 414 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 415 { 416 g_io_types_supported[io_type] = enable; 417 } 418 419 static bool 420 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 421 { 422 return g_io_types_supported[io_type]; 423 } 424 425 static struct spdk_bdev_fn_table fn_table = { 426 .destruct = stub_destruct, 427 .submit_request = stub_submit_request, 428 .get_io_channel = bdev_ut_get_io_channel, 429 .io_type_supported = stub_io_type_supported, 430 }; 431 432 static int 433 bdev_ut_create_ch(void *io_device, void *ctx_buf) 434 { 435 struct bdev_ut_channel *ch = ctx_buf; 436 437 CU_ASSERT(g_bdev_ut_channel == NULL); 438 g_bdev_ut_channel = ch; 439 440 TAILQ_INIT(&ch->outstanding_io); 441 ch->outstanding_io_count = 0; 442 TAILQ_INIT(&ch->expected_io); 443 return 0; 444 } 445 446 static void 447 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 448 { 449 CU_ASSERT(g_bdev_ut_channel != NULL); 450 g_bdev_ut_channel = NULL; 451 } 452 453 struct spdk_bdev_module bdev_ut_if; 454 455 static int 456 bdev_ut_module_init(void) 457 { 458 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 459 sizeof(struct bdev_ut_channel), NULL); 460 spdk_bdev_module_init_done(&bdev_ut_if); 461 return 0; 462 } 463 464 static void 465 bdev_ut_module_fini(void) 466 { 467 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 468 } 469 470 struct spdk_bdev_module bdev_ut_if = { 471 .name = "bdev_ut", 472 .module_init = bdev_ut_module_init, 473 .module_fini = bdev_ut_module_fini, 474 .async_init = true, 475 }; 476 477 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 478 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 479 480 static int 481 vbdev_ut_module_init(void) 482 { 483 return 0; 484 } 485 486 static void 487 vbdev_ut_module_fini(void) 488 { 489 } 490 491 struct spdk_bdev_module vbdev_ut_if = { 492 .name = "vbdev_ut", 493 .module_init = vbdev_ut_module_init, 494 .module_fini = vbdev_ut_module_fini, 495 .examine_config = vbdev_ut_examine_config, 496 .examine_disk = vbdev_ut_examine_disk, 497 }; 498 499 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 500 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 501 502 struct ut_examine_ctx { 503 void (*examine_config)(struct spdk_bdev *bdev); 504 void (*examine_disk)(struct spdk_bdev *bdev); 505 uint32_t examine_config_count; 506 uint32_t examine_disk_count; 507 }; 508 509 static void 510 vbdev_ut_examine_config(struct spdk_bdev *bdev) 511 { 512 struct ut_examine_ctx *ctx = bdev->ctxt; 513 514 if (ctx != NULL) { 515 ctx->examine_config_count++; 516 if (ctx->examine_config != NULL) { 517 ctx->examine_config(bdev); 518 } 519 } 520 521 spdk_bdev_module_examine_done(&vbdev_ut_if); 522 } 523 524 static void 525 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 526 { 527 struct ut_examine_ctx *ctx = bdev->ctxt; 528 529 if (ctx != NULL) { 530 ctx->examine_disk_count++; 531 if (ctx->examine_disk != NULL) { 532 ctx->examine_disk(bdev); 533 } 534 } 535 536 spdk_bdev_module_examine_done(&vbdev_ut_if); 537 } 538 539 static struct spdk_bdev * 540 allocate_bdev_ctx(char *name, void *ctx) 541 { 542 struct spdk_bdev *bdev; 543 int rc; 544 545 bdev = calloc(1, sizeof(*bdev)); 546 SPDK_CU_ASSERT_FATAL(bdev != NULL); 547 548 bdev->ctxt = ctx; 549 bdev->name = name; 550 bdev->fn_table = &fn_table; 551 bdev->module = &bdev_ut_if; 552 bdev->blockcnt = 1024; 553 bdev->blocklen = 512; 554 555 spdk_uuid_generate(&bdev->uuid); 556 557 rc = spdk_bdev_register(bdev); 558 poll_threads(); 559 CU_ASSERT(rc == 0); 560 561 return bdev; 562 } 563 564 static struct spdk_bdev * 565 allocate_bdev(char *name) 566 { 567 return allocate_bdev_ctx(name, NULL); 568 } 569 570 static struct spdk_bdev * 571 allocate_vbdev(char *name) 572 { 573 struct spdk_bdev *bdev; 574 int rc; 575 576 bdev = calloc(1, sizeof(*bdev)); 577 SPDK_CU_ASSERT_FATAL(bdev != NULL); 578 579 bdev->name = name; 580 bdev->fn_table = &fn_table; 581 bdev->module = &vbdev_ut_if; 582 583 rc = spdk_bdev_register(bdev); 584 poll_threads(); 585 CU_ASSERT(rc == 0); 586 587 return bdev; 588 } 589 590 static void 591 free_bdev(struct spdk_bdev *bdev) 592 { 593 spdk_bdev_unregister(bdev, NULL, NULL); 594 poll_threads(); 595 memset(bdev, 0xFF, sizeof(*bdev)); 596 free(bdev); 597 } 598 599 static void 600 free_vbdev(struct spdk_bdev *bdev) 601 { 602 spdk_bdev_unregister(bdev, NULL, NULL); 603 poll_threads(); 604 memset(bdev, 0xFF, sizeof(*bdev)); 605 free(bdev); 606 } 607 608 static void 609 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 610 { 611 const char *bdev_name; 612 613 CU_ASSERT(bdev != NULL); 614 CU_ASSERT(rc == 0); 615 bdev_name = spdk_bdev_get_name(bdev); 616 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 617 618 free(stat); 619 620 *(bool *)cb_arg = true; 621 } 622 623 static void 624 bdev_unregister_cb(void *cb_arg, int rc) 625 { 626 g_unregister_arg = cb_arg; 627 g_unregister_rc = rc; 628 } 629 630 static void 631 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 632 { 633 } 634 635 static void 636 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 637 { 638 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 639 640 g_event_type1 = type; 641 if (SPDK_BDEV_EVENT_REMOVE == type) { 642 spdk_bdev_close(desc); 643 } 644 } 645 646 static void 647 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 648 { 649 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 650 651 g_event_type2 = type; 652 if (SPDK_BDEV_EVENT_REMOVE == type) { 653 spdk_bdev_close(desc); 654 } 655 } 656 657 static void 658 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 659 { 660 g_event_type3 = type; 661 } 662 663 static void 664 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 665 { 666 g_event_type4 = type; 667 } 668 669 static void 670 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 671 { 672 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 673 spdk_bdev_free_io(bdev_io); 674 } 675 676 static void 677 get_device_stat_test(void) 678 { 679 struct spdk_bdev *bdev; 680 struct spdk_bdev_io_stat *stat; 681 bool done; 682 683 bdev = allocate_bdev("bdev0"); 684 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 685 if (stat == NULL) { 686 free_bdev(bdev); 687 return; 688 } 689 690 done = false; 691 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 692 while (!done) { poll_threads(); } 693 694 free_bdev(bdev); 695 } 696 697 static void 698 open_write_test(void) 699 { 700 struct spdk_bdev *bdev[9]; 701 struct spdk_bdev_desc *desc[9] = {}; 702 int rc; 703 704 /* 705 * Create a tree of bdevs to test various open w/ write cases. 706 * 707 * bdev0 through bdev3 are physical block devices, such as NVMe 708 * namespaces or Ceph block devices. 709 * 710 * bdev4 is a virtual bdev with multiple base bdevs. This models 711 * caching or RAID use cases. 712 * 713 * bdev5 through bdev7 are all virtual bdevs with the same base 714 * bdev (except bdev7). This models partitioning or logical volume 715 * use cases. 716 * 717 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 718 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 719 * models caching, RAID, partitioning or logical volumes use cases. 720 * 721 * bdev8 is a virtual bdev with multiple base bdevs, but these 722 * base bdevs are themselves virtual bdevs. 723 * 724 * bdev8 725 * | 726 * +----------+ 727 * | | 728 * bdev4 bdev5 bdev6 bdev7 729 * | | | | 730 * +---+---+ +---+ + +---+---+ 731 * | | \ | / \ 732 * bdev0 bdev1 bdev2 bdev3 733 */ 734 735 bdev[0] = allocate_bdev("bdev0"); 736 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 737 CU_ASSERT(rc == 0); 738 739 bdev[1] = allocate_bdev("bdev1"); 740 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 741 CU_ASSERT(rc == 0); 742 743 bdev[2] = allocate_bdev("bdev2"); 744 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 745 CU_ASSERT(rc == 0); 746 747 bdev[3] = allocate_bdev("bdev3"); 748 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 749 CU_ASSERT(rc == 0); 750 751 bdev[4] = allocate_vbdev("bdev4"); 752 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 753 CU_ASSERT(rc == 0); 754 755 bdev[5] = allocate_vbdev("bdev5"); 756 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 757 CU_ASSERT(rc == 0); 758 759 bdev[6] = allocate_vbdev("bdev6"); 760 761 bdev[7] = allocate_vbdev("bdev7"); 762 763 bdev[8] = allocate_vbdev("bdev8"); 764 765 /* Open bdev0 read-only. This should succeed. */ 766 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 767 CU_ASSERT(rc == 0); 768 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 769 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 770 spdk_bdev_close(desc[0]); 771 772 /* 773 * Open bdev1 read/write. This should fail since bdev1 has been claimed 774 * by a vbdev module. 775 */ 776 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 777 CU_ASSERT(rc == -EPERM); 778 779 /* 780 * Open bdev4 read/write. This should fail since bdev3 has been claimed 781 * by a vbdev module. 782 */ 783 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 784 CU_ASSERT(rc == -EPERM); 785 786 /* Open bdev4 read-only. This should succeed. */ 787 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 788 CU_ASSERT(rc == 0); 789 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 790 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 791 spdk_bdev_close(desc[4]); 792 793 /* 794 * Open bdev8 read/write. This should succeed since it is a leaf 795 * bdev. 796 */ 797 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 798 CU_ASSERT(rc == 0); 799 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 800 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 801 spdk_bdev_close(desc[8]); 802 803 /* 804 * Open bdev5 read/write. This should fail since bdev4 has been claimed 805 * by a vbdev module. 806 */ 807 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 808 CU_ASSERT(rc == -EPERM); 809 810 /* Open bdev4 read-only. This should succeed. */ 811 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 812 CU_ASSERT(rc == 0); 813 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 814 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 815 spdk_bdev_close(desc[5]); 816 817 free_vbdev(bdev[8]); 818 819 free_vbdev(bdev[5]); 820 free_vbdev(bdev[6]); 821 free_vbdev(bdev[7]); 822 823 free_vbdev(bdev[4]); 824 825 free_bdev(bdev[0]); 826 free_bdev(bdev[1]); 827 free_bdev(bdev[2]); 828 free_bdev(bdev[3]); 829 } 830 831 static void 832 claim_test(void) 833 { 834 struct spdk_bdev *bdev; 835 struct spdk_bdev_desc *desc, *open_desc; 836 int rc; 837 uint32_t count; 838 839 /* 840 * A vbdev that uses a read-only bdev may need it to remain read-only. 841 * To do so, it opens the bdev read-only, then claims it without 842 * passing a spdk_bdev_desc. 843 */ 844 bdev = allocate_bdev("bdev0"); 845 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 846 CU_ASSERT(rc == 0); 847 CU_ASSERT(desc->write == false); 848 849 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 850 CU_ASSERT(rc == 0); 851 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 852 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 853 854 /* There should be only one open descriptor and it should still be ro */ 855 count = 0; 856 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 857 CU_ASSERT(open_desc == desc); 858 CU_ASSERT(!open_desc->write); 859 count++; 860 } 861 CU_ASSERT(count == 1); 862 863 /* A read-only bdev is upgraded to read-write if desc is passed. */ 864 spdk_bdev_module_release_bdev(bdev); 865 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 866 CU_ASSERT(rc == 0); 867 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 868 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 869 870 /* There should be only one open descriptor and it should be rw */ 871 count = 0; 872 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 873 CU_ASSERT(open_desc == desc); 874 CU_ASSERT(open_desc->write); 875 count++; 876 } 877 CU_ASSERT(count == 1); 878 879 spdk_bdev_close(desc); 880 free_bdev(bdev); 881 } 882 883 static void 884 bytes_to_blocks_test(void) 885 { 886 struct spdk_bdev bdev; 887 uint64_t offset_blocks, num_blocks; 888 889 memset(&bdev, 0, sizeof(bdev)); 890 891 bdev.blocklen = 512; 892 893 /* All parameters valid */ 894 offset_blocks = 0; 895 num_blocks = 0; 896 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 897 CU_ASSERT(offset_blocks == 1); 898 CU_ASSERT(num_blocks == 2); 899 900 /* Offset not a block multiple */ 901 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 902 903 /* Length not a block multiple */ 904 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 905 906 /* In case blocklen not the power of two */ 907 bdev.blocklen = 100; 908 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 909 CU_ASSERT(offset_blocks == 1); 910 CU_ASSERT(num_blocks == 2); 911 912 /* Offset not a block multiple */ 913 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 914 915 /* Length not a block multiple */ 916 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 917 } 918 919 static void 920 num_blocks_test(void) 921 { 922 struct spdk_bdev bdev; 923 struct spdk_bdev_desc *desc = NULL; 924 int rc; 925 926 memset(&bdev, 0, sizeof(bdev)); 927 bdev.name = "num_blocks"; 928 bdev.fn_table = &fn_table; 929 bdev.module = &bdev_ut_if; 930 spdk_bdev_register(&bdev); 931 poll_threads(); 932 spdk_bdev_notify_blockcnt_change(&bdev, 50); 933 934 /* Growing block number */ 935 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 936 /* Shrinking block number */ 937 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 938 939 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 940 CU_ASSERT(rc == 0); 941 SPDK_CU_ASSERT_FATAL(desc != NULL); 942 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 943 944 /* Growing block number */ 945 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 946 /* Shrinking block number */ 947 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 948 949 g_event_type1 = 0xFF; 950 /* Growing block number */ 951 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 952 953 poll_threads(); 954 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 955 956 g_event_type1 = 0xFF; 957 /* Growing block number and closing */ 958 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 959 960 spdk_bdev_close(desc); 961 spdk_bdev_unregister(&bdev, NULL, NULL); 962 963 poll_threads(); 964 965 /* Callback is not called for closed device */ 966 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 967 } 968 969 static void 970 io_valid_test(void) 971 { 972 struct spdk_bdev bdev; 973 974 memset(&bdev, 0, sizeof(bdev)); 975 976 bdev.blocklen = 512; 977 spdk_spin_init(&bdev.internal.spinlock); 978 979 spdk_bdev_notify_blockcnt_change(&bdev, 100); 980 981 /* All parameters valid */ 982 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 983 984 /* Last valid block */ 985 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 986 987 /* Offset past end of bdev */ 988 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 989 990 /* Offset + length past end of bdev */ 991 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 992 993 /* Offset near end of uint64_t range (2^64 - 1) */ 994 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 995 996 spdk_spin_destroy(&bdev.internal.spinlock); 997 } 998 999 static void 1000 alias_add_del_test(void) 1001 { 1002 struct spdk_bdev *bdev[3]; 1003 int rc; 1004 1005 /* Creating and registering bdevs */ 1006 bdev[0] = allocate_bdev("bdev0"); 1007 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 1008 1009 bdev[1] = allocate_bdev("bdev1"); 1010 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 1011 1012 bdev[2] = allocate_bdev("bdev2"); 1013 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 1014 1015 poll_threads(); 1016 1017 /* 1018 * Trying adding an alias identical to name. 1019 * Alias is identical to name, so it can not be added to aliases list 1020 */ 1021 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 1022 CU_ASSERT(rc == -EEXIST); 1023 1024 /* 1025 * Trying to add empty alias, 1026 * this one should fail 1027 */ 1028 rc = spdk_bdev_alias_add(bdev[0], NULL); 1029 CU_ASSERT(rc == -EINVAL); 1030 1031 /* Trying adding same alias to two different registered bdevs */ 1032 1033 /* Alias is used first time, so this one should pass */ 1034 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1035 CU_ASSERT(rc == 0); 1036 1037 /* Alias was added to another bdev, so this one should fail */ 1038 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1039 CU_ASSERT(rc == -EEXIST); 1040 1041 /* Alias is used first time, so this one should pass */ 1042 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1043 CU_ASSERT(rc == 0); 1044 1045 /* Trying removing an alias from registered bdevs */ 1046 1047 /* Alias is not on a bdev aliases list, so this one should fail */ 1048 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1049 CU_ASSERT(rc == -ENOENT); 1050 1051 /* Alias is present on a bdev aliases list, so this one should pass */ 1052 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1053 CU_ASSERT(rc == 0); 1054 1055 /* Alias is present on a bdev aliases list, so this one should pass */ 1056 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1057 CU_ASSERT(rc == 0); 1058 1059 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1060 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1061 CU_ASSERT(rc != 0); 1062 1063 /* Trying to del all alias from empty alias list */ 1064 spdk_bdev_alias_del_all(bdev[2]); 1065 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1066 1067 /* Trying to del all alias from non-empty alias list */ 1068 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1069 CU_ASSERT(rc == 0); 1070 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1071 CU_ASSERT(rc == 0); 1072 spdk_bdev_alias_del_all(bdev[2]); 1073 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1074 1075 /* Unregister and free bdevs */ 1076 spdk_bdev_unregister(bdev[0], NULL, NULL); 1077 spdk_bdev_unregister(bdev[1], NULL, NULL); 1078 spdk_bdev_unregister(bdev[2], NULL, NULL); 1079 1080 poll_threads(); 1081 1082 free(bdev[0]); 1083 free(bdev[1]); 1084 free(bdev[2]); 1085 } 1086 1087 static void 1088 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1089 { 1090 g_io_done = true; 1091 g_io_status = bdev_io->internal.status; 1092 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1093 (bdev_io->u.bdev.zcopy.start)) { 1094 g_zcopy_bdev_io = bdev_io; 1095 } else { 1096 spdk_bdev_free_io(bdev_io); 1097 g_zcopy_bdev_io = NULL; 1098 } 1099 } 1100 1101 static void 1102 bdev_init_cb(void *arg, int rc) 1103 { 1104 CU_ASSERT(rc == 0); 1105 } 1106 1107 static void 1108 bdev_fini_cb(void *arg) 1109 { 1110 } 1111 1112 static void 1113 ut_init_bdev(struct spdk_bdev_opts *opts) 1114 { 1115 int rc; 1116 1117 if (opts != NULL) { 1118 rc = spdk_bdev_set_opts(opts); 1119 CU_ASSERT(rc == 0); 1120 } 1121 rc = spdk_iobuf_initialize(); 1122 CU_ASSERT(rc == 0); 1123 spdk_bdev_initialize(bdev_init_cb, NULL); 1124 poll_threads(); 1125 } 1126 1127 static void 1128 ut_fini_bdev(void) 1129 { 1130 spdk_bdev_finish(bdev_fini_cb, NULL); 1131 spdk_iobuf_finish(bdev_fini_cb, NULL); 1132 poll_threads(); 1133 } 1134 1135 struct bdev_ut_io_wait_entry { 1136 struct spdk_bdev_io_wait_entry entry; 1137 struct spdk_io_channel *io_ch; 1138 struct spdk_bdev_desc *desc; 1139 bool submitted; 1140 }; 1141 1142 static void 1143 io_wait_cb(void *arg) 1144 { 1145 struct bdev_ut_io_wait_entry *entry = arg; 1146 int rc; 1147 1148 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1149 CU_ASSERT(rc == 0); 1150 entry->submitted = true; 1151 } 1152 1153 static void 1154 bdev_io_types_test(void) 1155 { 1156 struct spdk_bdev *bdev; 1157 struct spdk_bdev_desc *desc = NULL; 1158 struct spdk_io_channel *io_ch; 1159 struct spdk_bdev_opts bdev_opts = {}; 1160 int rc; 1161 1162 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1163 bdev_opts.bdev_io_pool_size = 4; 1164 bdev_opts.bdev_io_cache_size = 2; 1165 ut_init_bdev(&bdev_opts); 1166 1167 bdev = allocate_bdev("bdev0"); 1168 1169 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1170 CU_ASSERT(rc == 0); 1171 poll_threads(); 1172 SPDK_CU_ASSERT_FATAL(desc != NULL); 1173 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1174 io_ch = spdk_bdev_get_io_channel(desc); 1175 CU_ASSERT(io_ch != NULL); 1176 1177 /* WRITE and WRITE ZEROES are not supported */ 1178 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1179 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1180 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1181 CU_ASSERT(rc == -ENOTSUP); 1182 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1183 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1184 1185 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1186 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1187 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1188 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1189 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1190 CU_ASSERT(rc == -ENOTSUP); 1191 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1192 CU_ASSERT(rc == -ENOTSUP); 1193 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1194 CU_ASSERT(rc == -ENOTSUP); 1195 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1196 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1197 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1198 1199 spdk_put_io_channel(io_ch); 1200 spdk_bdev_close(desc); 1201 free_bdev(bdev); 1202 ut_fini_bdev(); 1203 } 1204 1205 static void 1206 bdev_io_wait_test(void) 1207 { 1208 struct spdk_bdev *bdev; 1209 struct spdk_bdev_desc *desc = NULL; 1210 struct spdk_io_channel *io_ch; 1211 struct spdk_bdev_opts bdev_opts = {}; 1212 struct bdev_ut_io_wait_entry io_wait_entry; 1213 struct bdev_ut_io_wait_entry io_wait_entry2; 1214 int rc; 1215 1216 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1217 bdev_opts.bdev_io_pool_size = 4; 1218 bdev_opts.bdev_io_cache_size = 2; 1219 ut_init_bdev(&bdev_opts); 1220 1221 bdev = allocate_bdev("bdev0"); 1222 1223 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1224 CU_ASSERT(rc == 0); 1225 poll_threads(); 1226 SPDK_CU_ASSERT_FATAL(desc != NULL); 1227 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1228 io_ch = spdk_bdev_get_io_channel(desc); 1229 CU_ASSERT(io_ch != NULL); 1230 1231 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1232 CU_ASSERT(rc == 0); 1233 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1234 CU_ASSERT(rc == 0); 1235 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1236 CU_ASSERT(rc == 0); 1237 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1238 CU_ASSERT(rc == 0); 1239 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1240 1241 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1242 CU_ASSERT(rc == -ENOMEM); 1243 1244 io_wait_entry.entry.bdev = bdev; 1245 io_wait_entry.entry.cb_fn = io_wait_cb; 1246 io_wait_entry.entry.cb_arg = &io_wait_entry; 1247 io_wait_entry.io_ch = io_ch; 1248 io_wait_entry.desc = desc; 1249 io_wait_entry.submitted = false; 1250 /* Cannot use the same io_wait_entry for two different calls. */ 1251 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1252 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1253 1254 /* Queue two I/O waits. */ 1255 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1256 CU_ASSERT(rc == 0); 1257 CU_ASSERT(io_wait_entry.submitted == false); 1258 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1259 CU_ASSERT(rc == 0); 1260 CU_ASSERT(io_wait_entry2.submitted == false); 1261 1262 stub_complete_io(1); 1263 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1264 CU_ASSERT(io_wait_entry.submitted == true); 1265 CU_ASSERT(io_wait_entry2.submitted == false); 1266 1267 stub_complete_io(1); 1268 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1269 CU_ASSERT(io_wait_entry2.submitted == true); 1270 1271 stub_complete_io(4); 1272 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1273 1274 spdk_put_io_channel(io_ch); 1275 spdk_bdev_close(desc); 1276 free_bdev(bdev); 1277 ut_fini_bdev(); 1278 } 1279 1280 static void 1281 bdev_io_spans_split_test(void) 1282 { 1283 struct spdk_bdev bdev; 1284 struct spdk_bdev_io bdev_io; 1285 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1286 1287 memset(&bdev, 0, sizeof(bdev)); 1288 bdev_io.u.bdev.iovs = iov; 1289 1290 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1291 bdev.optimal_io_boundary = 0; 1292 bdev.max_segment_size = 0; 1293 bdev.max_num_segments = 0; 1294 bdev_io.bdev = &bdev; 1295 1296 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1297 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1298 1299 bdev.split_on_optimal_io_boundary = true; 1300 bdev.optimal_io_boundary = 32; 1301 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1302 1303 /* RESETs are not based on LBAs - so this should return false. */ 1304 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1305 1306 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1307 bdev_io.u.bdev.offset_blocks = 0; 1308 bdev_io.u.bdev.num_blocks = 32; 1309 1310 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1311 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1312 1313 bdev_io.u.bdev.num_blocks = 33; 1314 1315 /* This I/O spans a boundary. */ 1316 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1317 1318 bdev_io.u.bdev.num_blocks = 32; 1319 bdev.max_segment_size = 512 * 32; 1320 bdev.max_num_segments = 1; 1321 bdev_io.u.bdev.iovcnt = 1; 1322 iov[0].iov_len = 512; 1323 1324 /* Does not cross and exceed max_size or max_segs */ 1325 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1326 1327 bdev.split_on_optimal_io_boundary = false; 1328 bdev.max_segment_size = 512; 1329 bdev.max_num_segments = 1; 1330 bdev_io.u.bdev.iovcnt = 2; 1331 1332 /* Exceed max_segs */ 1333 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1334 1335 bdev.max_num_segments = 2; 1336 iov[0].iov_len = 513; 1337 iov[1].iov_len = 512; 1338 1339 /* Exceed max_sizes */ 1340 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1341 1342 bdev.max_segment_size = 0; 1343 bdev.write_unit_size = 32; 1344 bdev.split_on_write_unit = true; 1345 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1346 1347 /* This I/O is one write unit */ 1348 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1349 1350 bdev_io.u.bdev.num_blocks = 32 * 2; 1351 1352 /* This I/O is more than one write unit */ 1353 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1354 1355 bdev_io.u.bdev.offset_blocks = 1; 1356 bdev_io.u.bdev.num_blocks = 32; 1357 1358 /* This I/O is not aligned to write unit size */ 1359 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1360 } 1361 1362 static void 1363 bdev_io_boundary_split_test(void) 1364 { 1365 struct spdk_bdev *bdev; 1366 struct spdk_bdev_desc *desc = NULL; 1367 struct spdk_io_channel *io_ch; 1368 struct spdk_bdev_opts bdev_opts = {}; 1369 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1370 struct ut_expected_io *expected_io; 1371 void *md_buf = (void *)0xFF000000; 1372 uint64_t i; 1373 int rc; 1374 1375 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1376 bdev_opts.bdev_io_pool_size = 512; 1377 bdev_opts.bdev_io_cache_size = 64; 1378 ut_init_bdev(&bdev_opts); 1379 1380 bdev = allocate_bdev("bdev0"); 1381 1382 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1383 CU_ASSERT(rc == 0); 1384 SPDK_CU_ASSERT_FATAL(desc != NULL); 1385 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1386 io_ch = spdk_bdev_get_io_channel(desc); 1387 CU_ASSERT(io_ch != NULL); 1388 1389 bdev->optimal_io_boundary = 16; 1390 bdev->split_on_optimal_io_boundary = false; 1391 1392 g_io_done = false; 1393 1394 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1396 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1397 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1398 1399 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1400 CU_ASSERT(rc == 0); 1401 CU_ASSERT(g_io_done == false); 1402 1403 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1404 stub_complete_io(1); 1405 CU_ASSERT(g_io_done == true); 1406 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1407 1408 bdev->split_on_optimal_io_boundary = true; 1409 bdev->md_interleave = false; 1410 bdev->md_len = 8; 1411 1412 /* Now test that a single-vector command is split correctly. 1413 * Offset 14, length 8, payload 0xF000 1414 * Child - Offset 14, length 2, payload 0xF000 1415 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1416 * 1417 * Set up the expected values before calling spdk_bdev_read_blocks 1418 */ 1419 g_io_done = false; 1420 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1421 expected_io->md_buf = md_buf; 1422 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1423 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1424 1425 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1426 expected_io->md_buf = md_buf + 2 * 8; 1427 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1428 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1429 1430 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1431 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1432 14, 8, io_done, NULL); 1433 CU_ASSERT(rc == 0); 1434 CU_ASSERT(g_io_done == false); 1435 1436 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1437 stub_complete_io(2); 1438 CU_ASSERT(g_io_done == true); 1439 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1440 1441 /* Now set up a more complex, multi-vector command that needs to be split, 1442 * including splitting iovecs. 1443 */ 1444 iov[0].iov_base = (void *)0x10000; 1445 iov[0].iov_len = 512; 1446 iov[1].iov_base = (void *)0x20000; 1447 iov[1].iov_len = 20 * 512; 1448 iov[2].iov_base = (void *)0x30000; 1449 iov[2].iov_len = 11 * 512; 1450 1451 g_io_done = false; 1452 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1453 expected_io->md_buf = md_buf; 1454 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1455 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1456 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1457 1458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1459 expected_io->md_buf = md_buf + 2 * 8; 1460 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1461 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1462 1463 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1464 expected_io->md_buf = md_buf + 18 * 8; 1465 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1466 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1467 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1468 1469 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1470 14, 32, io_done, NULL); 1471 CU_ASSERT(rc == 0); 1472 CU_ASSERT(g_io_done == false); 1473 1474 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1475 stub_complete_io(3); 1476 CU_ASSERT(g_io_done == true); 1477 1478 /* Test multi vector command that needs to be split by strip and then needs to be 1479 * split further due to the capacity of child iovs. 1480 */ 1481 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1482 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1483 iov[i].iov_len = 512; 1484 } 1485 1486 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1487 g_io_done = false; 1488 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1489 SPDK_BDEV_IO_NUM_CHILD_IOV); 1490 expected_io->md_buf = md_buf; 1491 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1492 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1493 } 1494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1495 1496 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1497 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1498 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1499 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1500 ut_expected_io_set_iov(expected_io, i, 1501 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1502 } 1503 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1504 1505 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1506 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1507 CU_ASSERT(rc == 0); 1508 CU_ASSERT(g_io_done == false); 1509 1510 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1511 stub_complete_io(1); 1512 CU_ASSERT(g_io_done == false); 1513 1514 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1515 stub_complete_io(1); 1516 CU_ASSERT(g_io_done == true); 1517 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1518 1519 /* Test multi vector command that needs to be split by strip and then needs to be 1520 * split further due to the capacity of child iovs. In this case, the length of 1521 * the rest of iovec array with an I/O boundary is the multiple of block size. 1522 */ 1523 1524 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1525 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1526 */ 1527 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1528 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1529 iov[i].iov_len = 512; 1530 } 1531 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1532 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1533 iov[i].iov_len = 256; 1534 } 1535 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1536 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1537 1538 /* Add an extra iovec to trigger split */ 1539 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1540 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1541 1542 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1543 g_io_done = false; 1544 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1545 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1546 expected_io->md_buf = md_buf; 1547 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1548 ut_expected_io_set_iov(expected_io, i, 1549 (void *)((i + 1) * 0x10000), 512); 1550 } 1551 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1552 ut_expected_io_set_iov(expected_io, i, 1553 (void *)((i + 1) * 0x10000), 256); 1554 } 1555 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1556 1557 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1558 1, 1); 1559 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1560 ut_expected_io_set_iov(expected_io, 0, 1561 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1562 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1563 1564 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1565 1, 1); 1566 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1567 ut_expected_io_set_iov(expected_io, 0, 1568 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1569 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1570 1571 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1572 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1573 CU_ASSERT(rc == 0); 1574 CU_ASSERT(g_io_done == false); 1575 1576 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1577 stub_complete_io(1); 1578 CU_ASSERT(g_io_done == false); 1579 1580 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1581 stub_complete_io(2); 1582 CU_ASSERT(g_io_done == true); 1583 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1584 1585 /* Test multi vector command that needs to be split by strip and then needs to be 1586 * split further due to the capacity of child iovs, the child request offset should 1587 * be rewind to last aligned offset and go success without error. 1588 */ 1589 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1590 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1591 iov[i].iov_len = 512; 1592 } 1593 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1594 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1595 1596 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1597 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1598 1599 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1600 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1601 1602 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1603 g_io_done = false; 1604 g_io_status = 0; 1605 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1606 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1607 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1608 expected_io->md_buf = md_buf; 1609 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1610 ut_expected_io_set_iov(expected_io, i, 1611 (void *)((i + 1) * 0x10000), 512); 1612 } 1613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1614 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1615 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1616 1, 2); 1617 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1618 ut_expected_io_set_iov(expected_io, 0, 1619 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1620 ut_expected_io_set_iov(expected_io, 1, 1621 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1622 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1623 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1624 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1625 1, 1); 1626 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1627 ut_expected_io_set_iov(expected_io, 0, 1628 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1629 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1630 1631 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1632 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1633 CU_ASSERT(rc == 0); 1634 CU_ASSERT(g_io_done == false); 1635 1636 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1637 stub_complete_io(1); 1638 CU_ASSERT(g_io_done == false); 1639 1640 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1641 stub_complete_io(2); 1642 CU_ASSERT(g_io_done == true); 1643 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1644 1645 /* Test multi vector command that needs to be split due to the IO boundary and 1646 * the capacity of child iovs. Especially test the case when the command is 1647 * split due to the capacity of child iovs, the tail address is not aligned with 1648 * block size and is rewinded to the aligned address. 1649 * 1650 * The iovecs used in read request is complex but is based on the data 1651 * collected in the real issue. We change the base addresses but keep the lengths 1652 * not to loose the credibility of the test. 1653 */ 1654 bdev->optimal_io_boundary = 128; 1655 g_io_done = false; 1656 g_io_status = 0; 1657 1658 for (i = 0; i < 31; i++) { 1659 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1660 iov[i].iov_len = 1024; 1661 } 1662 iov[31].iov_base = (void *)0xFEED1F00000; 1663 iov[31].iov_len = 32768; 1664 iov[32].iov_base = (void *)0xFEED2000000; 1665 iov[32].iov_len = 160; 1666 iov[33].iov_base = (void *)0xFEED2100000; 1667 iov[33].iov_len = 4096; 1668 iov[34].iov_base = (void *)0xFEED2200000; 1669 iov[34].iov_len = 4096; 1670 iov[35].iov_base = (void *)0xFEED2300000; 1671 iov[35].iov_len = 4096; 1672 iov[36].iov_base = (void *)0xFEED2400000; 1673 iov[36].iov_len = 4096; 1674 iov[37].iov_base = (void *)0xFEED2500000; 1675 iov[37].iov_len = 4096; 1676 iov[38].iov_base = (void *)0xFEED2600000; 1677 iov[38].iov_len = 4096; 1678 iov[39].iov_base = (void *)0xFEED2700000; 1679 iov[39].iov_len = 4096; 1680 iov[40].iov_base = (void *)0xFEED2800000; 1681 iov[40].iov_len = 4096; 1682 iov[41].iov_base = (void *)0xFEED2900000; 1683 iov[41].iov_len = 4096; 1684 iov[42].iov_base = (void *)0xFEED2A00000; 1685 iov[42].iov_len = 4096; 1686 iov[43].iov_base = (void *)0xFEED2B00000; 1687 iov[43].iov_len = 12288; 1688 iov[44].iov_base = (void *)0xFEED2C00000; 1689 iov[44].iov_len = 8192; 1690 iov[45].iov_base = (void *)0xFEED2F00000; 1691 iov[45].iov_len = 4096; 1692 iov[46].iov_base = (void *)0xFEED3000000; 1693 iov[46].iov_len = 4096; 1694 iov[47].iov_base = (void *)0xFEED3100000; 1695 iov[47].iov_len = 4096; 1696 iov[48].iov_base = (void *)0xFEED3200000; 1697 iov[48].iov_len = 24576; 1698 iov[49].iov_base = (void *)0xFEED3300000; 1699 iov[49].iov_len = 16384; 1700 iov[50].iov_base = (void *)0xFEED3400000; 1701 iov[50].iov_len = 12288; 1702 iov[51].iov_base = (void *)0xFEED3500000; 1703 iov[51].iov_len = 4096; 1704 iov[52].iov_base = (void *)0xFEED3600000; 1705 iov[52].iov_len = 4096; 1706 iov[53].iov_base = (void *)0xFEED3700000; 1707 iov[53].iov_len = 4096; 1708 iov[54].iov_base = (void *)0xFEED3800000; 1709 iov[54].iov_len = 28672; 1710 iov[55].iov_base = (void *)0xFEED3900000; 1711 iov[55].iov_len = 20480; 1712 iov[56].iov_base = (void *)0xFEED3A00000; 1713 iov[56].iov_len = 4096; 1714 iov[57].iov_base = (void *)0xFEED3B00000; 1715 iov[57].iov_len = 12288; 1716 iov[58].iov_base = (void *)0xFEED3C00000; 1717 iov[58].iov_len = 4096; 1718 iov[59].iov_base = (void *)0xFEED3D00000; 1719 iov[59].iov_len = 4096; 1720 iov[60].iov_base = (void *)0xFEED3E00000; 1721 iov[60].iov_len = 352; 1722 1723 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1724 * of child iovs, 1725 */ 1726 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1727 expected_io->md_buf = md_buf; 1728 for (i = 0; i < 32; i++) { 1729 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1730 } 1731 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1732 1733 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1734 * split by the IO boundary requirement. 1735 */ 1736 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1737 expected_io->md_buf = md_buf + 126 * 8; 1738 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1739 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1740 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1741 1742 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1743 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1744 */ 1745 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1746 expected_io->md_buf = md_buf + 128 * 8; 1747 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1748 iov[33].iov_len - 864); 1749 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1750 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1751 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1752 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1753 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1754 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1755 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1756 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1757 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1758 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1759 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1760 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1761 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1762 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1763 1764 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1765 * first 864 bytes of iov[52] split by the IO boundary requirement. 1766 */ 1767 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1768 expected_io->md_buf = md_buf + 256 * 8; 1769 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1770 iov[46].iov_len - 864); 1771 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1772 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1773 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1774 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1775 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1776 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1777 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1778 1779 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1780 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1781 */ 1782 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1783 expected_io->md_buf = md_buf + 384 * 8; 1784 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1785 iov[52].iov_len - 864); 1786 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1787 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1788 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1789 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1790 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1791 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1792 1793 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1794 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1795 */ 1796 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1797 expected_io->md_buf = md_buf + 512 * 8; 1798 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1799 iov[57].iov_len - 4960); 1800 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1801 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1802 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1803 1804 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1805 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1806 expected_io->md_buf = md_buf + 542 * 8; 1807 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1808 iov[59].iov_len - 3936); 1809 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1810 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1811 1812 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1813 0, 543, io_done, NULL); 1814 CU_ASSERT(rc == 0); 1815 CU_ASSERT(g_io_done == false); 1816 1817 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1818 stub_complete_io(1); 1819 CU_ASSERT(g_io_done == false); 1820 1821 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1822 stub_complete_io(5); 1823 CU_ASSERT(g_io_done == false); 1824 1825 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1826 stub_complete_io(1); 1827 CU_ASSERT(g_io_done == true); 1828 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1829 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1830 1831 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1832 * split, so test that. 1833 */ 1834 bdev->optimal_io_boundary = 15; 1835 g_io_done = false; 1836 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1837 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1838 1839 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1840 CU_ASSERT(rc == 0); 1841 CU_ASSERT(g_io_done == false); 1842 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1843 stub_complete_io(1); 1844 CU_ASSERT(g_io_done == true); 1845 1846 /* Test an UNMAP. This should also not be split. */ 1847 bdev->optimal_io_boundary = 16; 1848 g_io_done = false; 1849 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1850 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1851 1852 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1853 CU_ASSERT(rc == 0); 1854 CU_ASSERT(g_io_done == false); 1855 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1856 stub_complete_io(1); 1857 CU_ASSERT(g_io_done == true); 1858 1859 /* Test a FLUSH. This should also not be split. */ 1860 bdev->optimal_io_boundary = 16; 1861 g_io_done = false; 1862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1863 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1864 1865 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1866 CU_ASSERT(rc == 0); 1867 CU_ASSERT(g_io_done == false); 1868 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1869 stub_complete_io(1); 1870 CU_ASSERT(g_io_done == true); 1871 1872 /* Test a COPY. This should also not be split. */ 1873 bdev->optimal_io_boundary = 15; 1874 g_io_done = false; 1875 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1876 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1877 1878 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1879 CU_ASSERT(rc == 0); 1880 CU_ASSERT(g_io_done == false); 1881 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1882 stub_complete_io(1); 1883 CU_ASSERT(g_io_done == true); 1884 1885 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1886 1887 /* Children requests return an error status */ 1888 bdev->optimal_io_boundary = 16; 1889 iov[0].iov_base = (void *)0x10000; 1890 iov[0].iov_len = 512 * 64; 1891 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1892 g_io_done = false; 1893 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1894 1895 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1896 CU_ASSERT(rc == 0); 1897 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1898 stub_complete_io(4); 1899 CU_ASSERT(g_io_done == false); 1900 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1901 stub_complete_io(1); 1902 CU_ASSERT(g_io_done == true); 1903 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1904 1905 /* Test if a multi vector command terminated with failure before continuing 1906 * splitting process when one of child I/O failed. 1907 * The multi vector command is as same as the above that needs to be split by strip 1908 * and then needs to be split further due to the capacity of child iovs. 1909 */ 1910 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1911 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1912 iov[i].iov_len = 512; 1913 } 1914 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1915 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1916 1917 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1918 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1919 1920 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1921 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1922 1923 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1924 1925 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1926 g_io_done = false; 1927 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1928 1929 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1930 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1931 CU_ASSERT(rc == 0); 1932 CU_ASSERT(g_io_done == false); 1933 1934 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1935 stub_complete_io(1); 1936 CU_ASSERT(g_io_done == true); 1937 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1938 1939 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1940 1941 /* for this test we will create the following conditions to hit the code path where 1942 * we are trying to send and IO following a split that has no iovs because we had to 1943 * trim them for alignment reasons. 1944 * 1945 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1946 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1947 * position 30 and overshoot by 0x2e. 1948 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1949 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1950 * which eliniates that vector so we just send the first split IO with 30 vectors 1951 * and let the completion pick up the last 2 vectors. 1952 */ 1953 bdev->optimal_io_boundary = 32; 1954 bdev->split_on_optimal_io_boundary = true; 1955 g_io_done = false; 1956 1957 /* Init all parent IOVs to 0x212 */ 1958 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1959 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1960 iov[i].iov_len = 0x212; 1961 } 1962 1963 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1964 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1965 /* expect 0-29 to be 1:1 with the parent iov */ 1966 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1967 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1968 } 1969 1970 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1971 * where 0x1e is the amount we overshot the 16K boundary 1972 */ 1973 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1974 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1975 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1976 1977 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1978 * shortened that take it to the next boundary and then a final one to get us to 1979 * 0x4200 bytes for the IO. 1980 */ 1981 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1982 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 1983 /* position 30 picked up the remaining bytes to the next boundary */ 1984 ut_expected_io_set_iov(expected_io, 0, 1985 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1986 1987 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1988 ut_expected_io_set_iov(expected_io, 1, 1989 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1990 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1991 1992 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 1993 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1994 CU_ASSERT(rc == 0); 1995 CU_ASSERT(g_io_done == false); 1996 1997 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1998 stub_complete_io(1); 1999 CU_ASSERT(g_io_done == false); 2000 2001 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2002 stub_complete_io(1); 2003 CU_ASSERT(g_io_done == true); 2004 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2005 2006 spdk_put_io_channel(io_ch); 2007 spdk_bdev_close(desc); 2008 free_bdev(bdev); 2009 ut_fini_bdev(); 2010 } 2011 2012 static void 2013 bdev_io_max_size_and_segment_split_test(void) 2014 { 2015 struct spdk_bdev *bdev; 2016 struct spdk_bdev_desc *desc = NULL; 2017 struct spdk_io_channel *io_ch; 2018 struct spdk_bdev_opts bdev_opts = {}; 2019 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2020 struct ut_expected_io *expected_io; 2021 uint64_t i; 2022 int rc; 2023 2024 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2025 bdev_opts.bdev_io_pool_size = 512; 2026 bdev_opts.bdev_io_cache_size = 64; 2027 bdev_opts.opts_size = sizeof(bdev_opts); 2028 ut_init_bdev(&bdev_opts); 2029 2030 bdev = allocate_bdev("bdev0"); 2031 2032 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2033 CU_ASSERT(rc == 0); 2034 SPDK_CU_ASSERT_FATAL(desc != NULL); 2035 io_ch = spdk_bdev_get_io_channel(desc); 2036 CU_ASSERT(io_ch != NULL); 2037 2038 bdev->split_on_optimal_io_boundary = false; 2039 bdev->optimal_io_boundary = 0; 2040 2041 /* Case 0 max_num_segments == 0. 2042 * but segment size 2 * 512 > 512 2043 */ 2044 bdev->max_segment_size = 512; 2045 bdev->max_num_segments = 0; 2046 g_io_done = false; 2047 2048 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2049 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2050 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2051 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2052 2053 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2054 CU_ASSERT(rc == 0); 2055 CU_ASSERT(g_io_done == false); 2056 2057 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2058 stub_complete_io(1); 2059 CU_ASSERT(g_io_done == true); 2060 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2061 2062 /* Case 1 max_segment_size == 0 2063 * but iov num 2 > 1. 2064 */ 2065 bdev->max_segment_size = 0; 2066 bdev->max_num_segments = 1; 2067 g_io_done = false; 2068 2069 iov[0].iov_base = (void *)0x10000; 2070 iov[0].iov_len = 512; 2071 iov[1].iov_base = (void *)0x20000; 2072 iov[1].iov_len = 8 * 512; 2073 2074 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2075 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2076 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2077 2078 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2079 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2080 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2081 2082 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2083 CU_ASSERT(rc == 0); 2084 CU_ASSERT(g_io_done == false); 2085 2086 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2087 stub_complete_io(2); 2088 CU_ASSERT(g_io_done == true); 2089 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2090 2091 /* Test that a non-vector command is split correctly. 2092 * Set up the expected values before calling spdk_bdev_read_blocks 2093 */ 2094 bdev->max_segment_size = 512; 2095 bdev->max_num_segments = 1; 2096 g_io_done = false; 2097 2098 /* Child IO 0 */ 2099 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2100 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2101 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2102 2103 /* Child IO 1 */ 2104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2105 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2106 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2107 2108 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2109 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2110 CU_ASSERT(rc == 0); 2111 CU_ASSERT(g_io_done == false); 2112 2113 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2114 stub_complete_io(2); 2115 CU_ASSERT(g_io_done == true); 2116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2117 2118 /* Now set up a more complex, multi-vector command that needs to be split, 2119 * including splitting iovecs. 2120 */ 2121 bdev->max_segment_size = 2 * 512; 2122 bdev->max_num_segments = 1; 2123 g_io_done = false; 2124 2125 iov[0].iov_base = (void *)0x10000; 2126 iov[0].iov_len = 2 * 512; 2127 iov[1].iov_base = (void *)0x20000; 2128 iov[1].iov_len = 4 * 512; 2129 iov[2].iov_base = (void *)0x30000; 2130 iov[2].iov_len = 6 * 512; 2131 2132 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2133 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2134 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2135 2136 /* Split iov[1].size to 2 iov entries then split the segments */ 2137 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2138 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2139 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2140 2141 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2142 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2143 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2144 2145 /* Split iov[2].size to 3 iov entries then split the segments */ 2146 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2147 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2148 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2149 2150 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2151 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2152 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2153 2154 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2155 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2156 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2157 2158 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2159 CU_ASSERT(rc == 0); 2160 CU_ASSERT(g_io_done == false); 2161 2162 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2163 stub_complete_io(6); 2164 CU_ASSERT(g_io_done == true); 2165 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2166 2167 /* Test multi vector command that needs to be split by strip and then needs to be 2168 * split further due to the capacity of parent IO child iovs. 2169 */ 2170 bdev->max_segment_size = 512; 2171 bdev->max_num_segments = 1; 2172 g_io_done = false; 2173 2174 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2175 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2176 iov[i].iov_len = 512 * 2; 2177 } 2178 2179 /* Each input iov.size is split into 2 iovs, 2180 * half of the input iov can fill all child iov entries of a single IO. 2181 */ 2182 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2183 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2184 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2185 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2186 2187 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2188 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2189 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2190 } 2191 2192 /* The remaining iov is split in the second round */ 2193 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2194 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2195 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2196 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2197 2198 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2199 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2200 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2201 } 2202 2203 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2204 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2205 CU_ASSERT(rc == 0); 2206 CU_ASSERT(g_io_done == false); 2207 2208 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2209 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2210 CU_ASSERT(g_io_done == false); 2211 2212 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2213 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2214 CU_ASSERT(g_io_done == true); 2215 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2216 2217 /* A wrong case, a child IO that is divided does 2218 * not meet the principle of multiples of block size, 2219 * and exits with error 2220 */ 2221 bdev->max_segment_size = 512; 2222 bdev->max_num_segments = 1; 2223 g_io_done = false; 2224 2225 iov[0].iov_base = (void *)0x10000; 2226 iov[0].iov_len = 512 + 256; 2227 iov[1].iov_base = (void *)0x20000; 2228 iov[1].iov_len = 256; 2229 2230 /* iov[0] is split to 512 and 256. 2231 * 256 is less than a block size, and it is found 2232 * in the next round of split that it is the first child IO smaller than 2233 * the block size, so the error exit 2234 */ 2235 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2236 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2237 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2238 2239 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2240 CU_ASSERT(rc == 0); 2241 CU_ASSERT(g_io_done == false); 2242 2243 /* First child IO is OK */ 2244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2245 stub_complete_io(1); 2246 CU_ASSERT(g_io_done == true); 2247 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2248 2249 /* error exit */ 2250 stub_complete_io(1); 2251 CU_ASSERT(g_io_done == true); 2252 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2253 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2254 2255 /* Test multi vector command that needs to be split by strip and then needs to be 2256 * split further due to the capacity of child iovs. 2257 * 2258 * In this case, the last two iovs need to be split, but it will exceed the capacity 2259 * of child iovs, so it needs to wait until the first batch completed. 2260 */ 2261 bdev->max_segment_size = 512; 2262 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2263 g_io_done = false; 2264 2265 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2266 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2267 iov[i].iov_len = 512; 2268 } 2269 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2270 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2271 iov[i].iov_len = 512 * 2; 2272 } 2273 2274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2275 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2276 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2277 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2278 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2279 } 2280 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2281 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2282 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2283 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2284 2285 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2286 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2287 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2288 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2289 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2290 2291 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2292 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2293 CU_ASSERT(rc == 0); 2294 CU_ASSERT(g_io_done == false); 2295 2296 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2297 stub_complete_io(1); 2298 CU_ASSERT(g_io_done == false); 2299 2300 /* Next round */ 2301 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2302 stub_complete_io(1); 2303 CU_ASSERT(g_io_done == true); 2304 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2305 2306 /* This case is similar to the previous one, but the io composed of 2307 * the last few entries of child iov is not enough for a blocklen, so they 2308 * cannot be put into this IO, but wait until the next time. 2309 */ 2310 bdev->max_segment_size = 512; 2311 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2312 g_io_done = false; 2313 2314 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2315 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2316 iov[i].iov_len = 512; 2317 } 2318 2319 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2320 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2321 iov[i].iov_len = 128; 2322 } 2323 2324 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2325 * Because the left 2 iov is not enough for a blocklen. 2326 */ 2327 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2328 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2329 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2330 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2331 } 2332 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2333 2334 /* The second child io waits until the end of the first child io before executing. 2335 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2336 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2337 */ 2338 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2339 1, 4); 2340 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2341 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2342 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2343 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2344 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2345 2346 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2347 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2348 CU_ASSERT(rc == 0); 2349 CU_ASSERT(g_io_done == false); 2350 2351 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2352 stub_complete_io(1); 2353 CU_ASSERT(g_io_done == false); 2354 2355 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2356 stub_complete_io(1); 2357 CU_ASSERT(g_io_done == true); 2358 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2359 2360 /* A very complicated case. Each sg entry exceeds max_segment_size and 2361 * needs to be split. At the same time, child io must be a multiple of blocklen. 2362 * At the same time, child iovcnt exceeds parent iovcnt. 2363 */ 2364 bdev->max_segment_size = 512 + 128; 2365 bdev->max_num_segments = 3; 2366 g_io_done = false; 2367 2368 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2369 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2370 iov[i].iov_len = 512 + 256; 2371 } 2372 2373 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2374 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2375 iov[i].iov_len = 512 + 128; 2376 } 2377 2378 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2379 * Consume 4 parent IO iov entries per for() round and 6 block size. 2380 * Generate 9 child IOs. 2381 */ 2382 for (i = 0; i < 3; i++) { 2383 uint32_t j = i * 4; 2384 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2385 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2386 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2387 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2388 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2389 2390 /* Child io must be a multiple of blocklen 2391 * iov[j + 2] must be split. If the third entry is also added, 2392 * the multiple of blocklen cannot be guaranteed. But it still 2393 * occupies one iov entry of the parent child iov. 2394 */ 2395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2396 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2397 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2398 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2399 2400 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2401 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2402 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2403 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2404 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2405 } 2406 2407 /* Child iov position at 27, the 10th child IO 2408 * iov entry index is 3 * 4 and offset is 3 * 6 2409 */ 2410 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2411 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2412 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2413 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2415 2416 /* Child iov position at 30, the 11th child IO */ 2417 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2418 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2419 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2420 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2421 2422 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2423 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2424 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2425 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2426 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2427 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2428 2429 /* Consume 9 child IOs and 27 child iov entries. 2430 * Consume 4 parent IO iov entries per for() round and 6 block size. 2431 * Parent IO iov index start from 16 and block offset start from 24 2432 */ 2433 for (i = 0; i < 3; i++) { 2434 uint32_t j = i * 4 + 16; 2435 uint32_t offset = i * 6 + 24; 2436 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2437 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2438 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2439 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2440 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2441 2442 /* Child io must be a multiple of blocklen 2443 * iov[j + 2] must be split. If the third entry is also added, 2444 * the multiple of blocklen cannot be guaranteed. But it still 2445 * occupies one iov entry of the parent child iov. 2446 */ 2447 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2448 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2449 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2450 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2451 2452 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2453 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2454 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2455 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2456 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2457 } 2458 2459 /* The 22th child IO, child iov position at 30 */ 2460 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2461 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2462 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2463 2464 /* The third round */ 2465 /* Here is the 23nd child IO and child iovpos is 0 */ 2466 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2467 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2468 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2469 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2470 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2471 2472 /* The 24th child IO */ 2473 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2474 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2475 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2476 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2477 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2478 2479 /* The 25th child IO */ 2480 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2481 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2482 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2483 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2484 2485 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2486 50, io_done, NULL); 2487 CU_ASSERT(rc == 0); 2488 CU_ASSERT(g_io_done == false); 2489 2490 /* Parent IO supports up to 32 child iovs, so it is calculated that 2491 * a maximum of 11 IOs can be split at a time, and the 2492 * splitting will continue after the first batch is over. 2493 */ 2494 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2495 stub_complete_io(11); 2496 CU_ASSERT(g_io_done == false); 2497 2498 /* The 2nd round */ 2499 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2500 stub_complete_io(11); 2501 CU_ASSERT(g_io_done == false); 2502 2503 /* The last round */ 2504 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2505 stub_complete_io(3); 2506 CU_ASSERT(g_io_done == true); 2507 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2508 2509 /* Test an WRITE_ZEROES. This should also not be split. */ 2510 bdev->max_segment_size = 512; 2511 bdev->max_num_segments = 1; 2512 g_io_done = false; 2513 2514 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2516 2517 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2518 CU_ASSERT(rc == 0); 2519 CU_ASSERT(g_io_done == false); 2520 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2521 stub_complete_io(1); 2522 CU_ASSERT(g_io_done == true); 2523 2524 /* Test an UNMAP. This should also not be split. */ 2525 g_io_done = false; 2526 2527 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2528 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2529 2530 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2531 CU_ASSERT(rc == 0); 2532 CU_ASSERT(g_io_done == false); 2533 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2534 stub_complete_io(1); 2535 CU_ASSERT(g_io_done == true); 2536 2537 /* Test a FLUSH. This should also not be split. */ 2538 g_io_done = false; 2539 2540 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2541 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2542 2543 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2544 CU_ASSERT(rc == 0); 2545 CU_ASSERT(g_io_done == false); 2546 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2547 stub_complete_io(1); 2548 CU_ASSERT(g_io_done == true); 2549 2550 /* Test a COPY. This should also not be split. */ 2551 g_io_done = false; 2552 2553 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2555 2556 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2557 CU_ASSERT(rc == 0); 2558 CU_ASSERT(g_io_done == false); 2559 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2560 stub_complete_io(1); 2561 CU_ASSERT(g_io_done == true); 2562 2563 spdk_put_io_channel(io_ch); 2564 spdk_bdev_close(desc); 2565 free_bdev(bdev); 2566 ut_fini_bdev(); 2567 } 2568 2569 static void 2570 bdev_io_mix_split_test(void) 2571 { 2572 struct spdk_bdev *bdev; 2573 struct spdk_bdev_desc *desc = NULL; 2574 struct spdk_io_channel *io_ch; 2575 struct spdk_bdev_opts bdev_opts = {}; 2576 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2577 struct ut_expected_io *expected_io; 2578 uint64_t i; 2579 int rc; 2580 2581 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2582 bdev_opts.bdev_io_pool_size = 512; 2583 bdev_opts.bdev_io_cache_size = 64; 2584 ut_init_bdev(&bdev_opts); 2585 2586 bdev = allocate_bdev("bdev0"); 2587 2588 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2589 CU_ASSERT(rc == 0); 2590 SPDK_CU_ASSERT_FATAL(desc != NULL); 2591 io_ch = spdk_bdev_get_io_channel(desc); 2592 CU_ASSERT(io_ch != NULL); 2593 2594 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2595 bdev->split_on_optimal_io_boundary = true; 2596 bdev->optimal_io_boundary = 16; 2597 2598 bdev->max_segment_size = 512; 2599 bdev->max_num_segments = 16; 2600 g_io_done = false; 2601 2602 /* IO crossing the IO boundary requires split 2603 * Total 2 child IOs. 2604 */ 2605 2606 /* The 1st child IO split the segment_size to multiple segment entry */ 2607 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2608 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2609 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2610 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2611 2612 /* The 2nd child IO split the segment_size to multiple segment entry */ 2613 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2614 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2615 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2616 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2617 2618 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2619 CU_ASSERT(rc == 0); 2620 CU_ASSERT(g_io_done == false); 2621 2622 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2623 stub_complete_io(2); 2624 CU_ASSERT(g_io_done == true); 2625 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2626 2627 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2628 bdev->max_segment_size = 15 * 512; 2629 bdev->max_num_segments = 1; 2630 g_io_done = false; 2631 2632 /* IO crossing the IO boundary requires split. 2633 * The 1st child IO segment size exceeds the max_segment_size, 2634 * So 1st child IO will be split to multiple segment entry. 2635 * Then it split to 2 child IOs because of the max_num_segments. 2636 * Total 3 child IOs. 2637 */ 2638 2639 /* The first 2 IOs are in an IO boundary. 2640 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2641 * So it split to the first 2 IOs. 2642 */ 2643 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2644 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2645 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2646 2647 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2648 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2649 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2650 2651 /* The 3rd Child IO is because of the io boundary */ 2652 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2653 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2654 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2655 2656 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2657 CU_ASSERT(rc == 0); 2658 CU_ASSERT(g_io_done == false); 2659 2660 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2661 stub_complete_io(3); 2662 CU_ASSERT(g_io_done == true); 2663 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2664 2665 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2666 bdev->max_segment_size = 17 * 512; 2667 bdev->max_num_segments = 1; 2668 g_io_done = false; 2669 2670 /* IO crossing the IO boundary requires split. 2671 * Child IO does not split. 2672 * Total 2 child IOs. 2673 */ 2674 2675 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2676 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2677 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2678 2679 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2680 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2681 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2682 2683 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2684 CU_ASSERT(rc == 0); 2685 CU_ASSERT(g_io_done == false); 2686 2687 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2688 stub_complete_io(2); 2689 CU_ASSERT(g_io_done == true); 2690 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2691 2692 /* Now set up a more complex, multi-vector command that needs to be split, 2693 * including splitting iovecs. 2694 * optimal_io_boundary < max_segment_size * max_num_segments 2695 */ 2696 bdev->max_segment_size = 3 * 512; 2697 bdev->max_num_segments = 6; 2698 g_io_done = false; 2699 2700 iov[0].iov_base = (void *)0x10000; 2701 iov[0].iov_len = 4 * 512; 2702 iov[1].iov_base = (void *)0x20000; 2703 iov[1].iov_len = 4 * 512; 2704 iov[2].iov_base = (void *)0x30000; 2705 iov[2].iov_len = 10 * 512; 2706 2707 /* IO crossing the IO boundary requires split. 2708 * The 1st child IO segment size exceeds the max_segment_size and after 2709 * splitting segment_size, the num_segments exceeds max_num_segments. 2710 * So 1st child IO will be split to 2 child IOs. 2711 * Total 3 child IOs. 2712 */ 2713 2714 /* The first 2 IOs are in an IO boundary. 2715 * After splitting segment size the segment num exceeds. 2716 * So it splits to 2 child IOs. 2717 */ 2718 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2719 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2720 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2721 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2722 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2723 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2724 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2725 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2726 2727 /* The 2nd child IO has the left segment entry */ 2728 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2729 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2730 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2731 2732 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2733 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2734 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2735 2736 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2737 CU_ASSERT(rc == 0); 2738 CU_ASSERT(g_io_done == false); 2739 2740 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2741 stub_complete_io(3); 2742 CU_ASSERT(g_io_done == true); 2743 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2744 2745 /* A very complicated case. Each sg entry exceeds max_segment_size 2746 * and split on io boundary. 2747 * optimal_io_boundary < max_segment_size * max_num_segments 2748 */ 2749 bdev->max_segment_size = 3 * 512; 2750 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2751 g_io_done = false; 2752 2753 for (i = 0; i < 20; i++) { 2754 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2755 iov[i].iov_len = 512 * 4; 2756 } 2757 2758 /* IO crossing the IO boundary requires split. 2759 * 80 block length can split 5 child IOs base on offset and IO boundary. 2760 * Each iov entry needs to be split to 2 entries because of max_segment_size 2761 * Total 5 child IOs. 2762 */ 2763 2764 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2765 * So each child IO occupies 8 child iov entries. 2766 */ 2767 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2768 for (i = 0; i < 4; i++) { 2769 int iovcnt = i * 2; 2770 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2771 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2772 } 2773 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2774 2775 /* 2nd child IO and total 16 child iov entries of parent IO */ 2776 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2777 for (i = 4; i < 8; i++) { 2778 int iovcnt = (i - 4) * 2; 2779 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2780 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2781 } 2782 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2783 2784 /* 3rd child IO and total 24 child iov entries of parent IO */ 2785 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2786 for (i = 8; i < 12; i++) { 2787 int iovcnt = (i - 8) * 2; 2788 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2789 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2790 } 2791 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2792 2793 /* 4th child IO and total 32 child iov entries of parent IO */ 2794 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2795 for (i = 12; i < 16; i++) { 2796 int iovcnt = (i - 12) * 2; 2797 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2798 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2799 } 2800 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2801 2802 /* 5th child IO and because of the child iov entry it should be split 2803 * in next round. 2804 */ 2805 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2806 for (i = 16; i < 20; i++) { 2807 int iovcnt = (i - 16) * 2; 2808 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2809 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2810 } 2811 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2812 2813 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2814 CU_ASSERT(rc == 0); 2815 CU_ASSERT(g_io_done == false); 2816 2817 /* First split round */ 2818 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2819 stub_complete_io(4); 2820 CU_ASSERT(g_io_done == false); 2821 2822 /* Second split round */ 2823 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2824 stub_complete_io(1); 2825 CU_ASSERT(g_io_done == true); 2826 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2827 2828 spdk_put_io_channel(io_ch); 2829 spdk_bdev_close(desc); 2830 free_bdev(bdev); 2831 ut_fini_bdev(); 2832 } 2833 2834 static void 2835 bdev_io_split_with_io_wait(void) 2836 { 2837 struct spdk_bdev *bdev; 2838 struct spdk_bdev_desc *desc = NULL; 2839 struct spdk_io_channel *io_ch; 2840 struct spdk_bdev_channel *channel; 2841 struct spdk_bdev_mgmt_channel *mgmt_ch; 2842 struct spdk_bdev_opts bdev_opts = {}; 2843 struct iovec iov[3]; 2844 struct ut_expected_io *expected_io; 2845 int rc; 2846 2847 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2848 bdev_opts.bdev_io_pool_size = 2; 2849 bdev_opts.bdev_io_cache_size = 1; 2850 ut_init_bdev(&bdev_opts); 2851 2852 bdev = allocate_bdev("bdev0"); 2853 2854 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2855 CU_ASSERT(rc == 0); 2856 CU_ASSERT(desc != NULL); 2857 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2858 io_ch = spdk_bdev_get_io_channel(desc); 2859 CU_ASSERT(io_ch != NULL); 2860 channel = spdk_io_channel_get_ctx(io_ch); 2861 mgmt_ch = channel->shared_resource->mgmt_ch; 2862 2863 bdev->optimal_io_boundary = 16; 2864 bdev->split_on_optimal_io_boundary = true; 2865 2866 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2867 CU_ASSERT(rc == 0); 2868 2869 /* Now test that a single-vector command is split correctly. 2870 * Offset 14, length 8, payload 0xF000 2871 * Child - Offset 14, length 2, payload 0xF000 2872 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2873 * 2874 * Set up the expected values before calling spdk_bdev_read_blocks 2875 */ 2876 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2877 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2878 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2879 2880 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2881 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2882 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2883 2884 /* The following children will be submitted sequentially due to the capacity of 2885 * spdk_bdev_io. 2886 */ 2887 2888 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2889 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2890 CU_ASSERT(rc == 0); 2891 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2892 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2893 2894 /* Completing the first read I/O will submit the first child */ 2895 stub_complete_io(1); 2896 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2897 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2898 2899 /* Completing the first child will submit the second child */ 2900 stub_complete_io(1); 2901 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2902 2903 /* Complete the second child I/O. This should result in our callback getting 2904 * invoked since the parent I/O is now complete. 2905 */ 2906 stub_complete_io(1); 2907 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2908 2909 /* Now set up a more complex, multi-vector command that needs to be split, 2910 * including splitting iovecs. 2911 */ 2912 iov[0].iov_base = (void *)0x10000; 2913 iov[0].iov_len = 512; 2914 iov[1].iov_base = (void *)0x20000; 2915 iov[1].iov_len = 20 * 512; 2916 iov[2].iov_base = (void *)0x30000; 2917 iov[2].iov_len = 11 * 512; 2918 2919 g_io_done = false; 2920 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2921 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2922 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2923 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2924 2925 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2926 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2927 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2928 2929 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2930 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2931 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2932 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2933 2934 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2935 CU_ASSERT(rc == 0); 2936 CU_ASSERT(g_io_done == false); 2937 2938 /* The following children will be submitted sequentially due to the capacity of 2939 * spdk_bdev_io. 2940 */ 2941 2942 /* Completing the first child will submit the second child */ 2943 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2944 stub_complete_io(1); 2945 CU_ASSERT(g_io_done == false); 2946 2947 /* Completing the second child will submit the third child */ 2948 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2949 stub_complete_io(1); 2950 CU_ASSERT(g_io_done == false); 2951 2952 /* Completing the third child will result in our callback getting invoked 2953 * since the parent I/O is now complete. 2954 */ 2955 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2956 stub_complete_io(1); 2957 CU_ASSERT(g_io_done == true); 2958 2959 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2960 2961 spdk_put_io_channel(io_ch); 2962 spdk_bdev_close(desc); 2963 free_bdev(bdev); 2964 ut_fini_bdev(); 2965 } 2966 2967 static void 2968 bdev_io_write_unit_split_test(void) 2969 { 2970 struct spdk_bdev *bdev; 2971 struct spdk_bdev_desc *desc = NULL; 2972 struct spdk_io_channel *io_ch; 2973 struct spdk_bdev_opts bdev_opts = {}; 2974 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 2975 struct ut_expected_io *expected_io; 2976 uint64_t i; 2977 int rc; 2978 2979 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2980 bdev_opts.bdev_io_pool_size = 512; 2981 bdev_opts.bdev_io_cache_size = 64; 2982 ut_init_bdev(&bdev_opts); 2983 2984 bdev = allocate_bdev("bdev0"); 2985 2986 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2987 CU_ASSERT(rc == 0); 2988 SPDK_CU_ASSERT_FATAL(desc != NULL); 2989 io_ch = spdk_bdev_get_io_channel(desc); 2990 CU_ASSERT(io_ch != NULL); 2991 2992 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 2993 bdev->write_unit_size = 32; 2994 bdev->split_on_write_unit = true; 2995 g_io_done = false; 2996 2997 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 2998 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 2999 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3000 3001 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 3002 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 3003 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3004 3005 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3006 CU_ASSERT(rc == 0); 3007 CU_ASSERT(g_io_done == false); 3008 3009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3010 stub_complete_io(2); 3011 CU_ASSERT(g_io_done == true); 3012 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3013 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3014 3015 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 3016 * based on write_unit_size, not optimal_io_boundary */ 3017 bdev->split_on_optimal_io_boundary = true; 3018 bdev->optimal_io_boundary = 16; 3019 g_io_done = false; 3020 3021 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3022 CU_ASSERT(rc == 0); 3023 CU_ASSERT(g_io_done == false); 3024 3025 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3026 stub_complete_io(2); 3027 CU_ASSERT(g_io_done == true); 3028 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3029 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3030 3031 /* Write I/O should fail if it is smaller than write_unit_size */ 3032 g_io_done = false; 3033 3034 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3035 CU_ASSERT(rc == 0); 3036 CU_ASSERT(g_io_done == false); 3037 3038 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3039 poll_threads(); 3040 CU_ASSERT(g_io_done == true); 3041 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3042 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3043 3044 /* Same for I/O not aligned to write_unit_size */ 3045 g_io_done = false; 3046 3047 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3048 CU_ASSERT(rc == 0); 3049 CU_ASSERT(g_io_done == false); 3050 3051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3052 poll_threads(); 3053 CU_ASSERT(g_io_done == true); 3054 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3055 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3056 3057 /* Write should fail if it needs to be split but there are not enough iovs to submit 3058 * an entire write unit */ 3059 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3060 g_io_done = false; 3061 3062 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3063 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3064 iov[i].iov_len = 512; 3065 } 3066 3067 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3068 io_done, NULL); 3069 CU_ASSERT(rc == 0); 3070 CU_ASSERT(g_io_done == false); 3071 3072 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3073 poll_threads(); 3074 CU_ASSERT(g_io_done == true); 3075 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3076 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3077 3078 spdk_put_io_channel(io_ch); 3079 spdk_bdev_close(desc); 3080 free_bdev(bdev); 3081 ut_fini_bdev(); 3082 } 3083 3084 static void 3085 bdev_io_alignment(void) 3086 { 3087 struct spdk_bdev *bdev; 3088 struct spdk_bdev_desc *desc = NULL; 3089 struct spdk_io_channel *io_ch; 3090 struct spdk_bdev_opts bdev_opts = {}; 3091 int rc; 3092 void *buf = NULL; 3093 struct iovec iovs[2]; 3094 int iovcnt; 3095 uint64_t alignment; 3096 3097 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3098 bdev_opts.bdev_io_pool_size = 20; 3099 bdev_opts.bdev_io_cache_size = 2; 3100 ut_init_bdev(&bdev_opts); 3101 3102 fn_table.submit_request = stub_submit_request_get_buf; 3103 bdev = allocate_bdev("bdev0"); 3104 3105 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3106 CU_ASSERT(rc == 0); 3107 CU_ASSERT(desc != NULL); 3108 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3109 io_ch = spdk_bdev_get_io_channel(desc); 3110 CU_ASSERT(io_ch != NULL); 3111 3112 /* Create aligned buffer */ 3113 rc = posix_memalign(&buf, 4096, 8192); 3114 SPDK_CU_ASSERT_FATAL(rc == 0); 3115 3116 /* Pass aligned single buffer with no alignment required */ 3117 alignment = 1; 3118 bdev->required_alignment = spdk_u32log2(alignment); 3119 3120 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3121 CU_ASSERT(rc == 0); 3122 stub_complete_io(1); 3123 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3124 alignment)); 3125 3126 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3127 CU_ASSERT(rc == 0); 3128 stub_complete_io(1); 3129 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3130 alignment)); 3131 3132 /* Pass unaligned single buffer with no alignment required */ 3133 alignment = 1; 3134 bdev->required_alignment = spdk_u32log2(alignment); 3135 3136 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3137 CU_ASSERT(rc == 0); 3138 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3139 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3140 stub_complete_io(1); 3141 3142 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3143 CU_ASSERT(rc == 0); 3144 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3145 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3146 stub_complete_io(1); 3147 3148 /* Pass unaligned single buffer with 512 alignment required */ 3149 alignment = 512; 3150 bdev->required_alignment = spdk_u32log2(alignment); 3151 3152 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3153 CU_ASSERT(rc == 0); 3154 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3155 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3156 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3157 alignment)); 3158 stub_complete_io(1); 3159 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3160 3161 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3162 CU_ASSERT(rc == 0); 3163 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3164 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3165 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3166 alignment)); 3167 stub_complete_io(1); 3168 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3169 3170 /* Pass unaligned single buffer with 4096 alignment required */ 3171 alignment = 4096; 3172 bdev->required_alignment = spdk_u32log2(alignment); 3173 3174 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3175 CU_ASSERT(rc == 0); 3176 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3177 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3178 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3179 alignment)); 3180 stub_complete_io(1); 3181 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3182 3183 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3184 CU_ASSERT(rc == 0); 3185 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3186 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3187 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3188 alignment)); 3189 stub_complete_io(1); 3190 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3191 3192 /* Pass aligned iovs with no alignment required */ 3193 alignment = 1; 3194 bdev->required_alignment = spdk_u32log2(alignment); 3195 3196 iovcnt = 1; 3197 iovs[0].iov_base = buf; 3198 iovs[0].iov_len = 512; 3199 3200 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3201 CU_ASSERT(rc == 0); 3202 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3203 stub_complete_io(1); 3204 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3205 3206 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3207 CU_ASSERT(rc == 0); 3208 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3209 stub_complete_io(1); 3210 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3211 3212 /* Pass unaligned iovs with no alignment required */ 3213 alignment = 1; 3214 bdev->required_alignment = spdk_u32log2(alignment); 3215 3216 iovcnt = 2; 3217 iovs[0].iov_base = buf + 16; 3218 iovs[0].iov_len = 256; 3219 iovs[1].iov_base = buf + 16 + 256 + 32; 3220 iovs[1].iov_len = 256; 3221 3222 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3223 CU_ASSERT(rc == 0); 3224 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3225 stub_complete_io(1); 3226 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3227 3228 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3229 CU_ASSERT(rc == 0); 3230 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3231 stub_complete_io(1); 3232 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3233 3234 /* Pass unaligned iov with 2048 alignment required */ 3235 alignment = 2048; 3236 bdev->required_alignment = spdk_u32log2(alignment); 3237 3238 iovcnt = 2; 3239 iovs[0].iov_base = buf + 16; 3240 iovs[0].iov_len = 256; 3241 iovs[1].iov_base = buf + 16 + 256 + 32; 3242 iovs[1].iov_len = 256; 3243 3244 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3245 CU_ASSERT(rc == 0); 3246 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3247 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3248 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3249 alignment)); 3250 stub_complete_io(1); 3251 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3252 3253 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3254 CU_ASSERT(rc == 0); 3255 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3256 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3257 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3258 alignment)); 3259 stub_complete_io(1); 3260 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3261 3262 /* Pass iov without allocated buffer without alignment required */ 3263 alignment = 1; 3264 bdev->required_alignment = spdk_u32log2(alignment); 3265 3266 iovcnt = 1; 3267 iovs[0].iov_base = NULL; 3268 iovs[0].iov_len = 0; 3269 3270 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3271 CU_ASSERT(rc == 0); 3272 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3273 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3274 alignment)); 3275 stub_complete_io(1); 3276 3277 /* Pass iov without allocated buffer with 1024 alignment required */ 3278 alignment = 1024; 3279 bdev->required_alignment = spdk_u32log2(alignment); 3280 3281 iovcnt = 1; 3282 iovs[0].iov_base = NULL; 3283 iovs[0].iov_len = 0; 3284 3285 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3286 CU_ASSERT(rc == 0); 3287 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3288 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3289 alignment)); 3290 stub_complete_io(1); 3291 3292 spdk_put_io_channel(io_ch); 3293 spdk_bdev_close(desc); 3294 free_bdev(bdev); 3295 fn_table.submit_request = stub_submit_request; 3296 ut_fini_bdev(); 3297 3298 free(buf); 3299 } 3300 3301 static void 3302 bdev_io_alignment_with_boundary(void) 3303 { 3304 struct spdk_bdev *bdev; 3305 struct spdk_bdev_desc *desc = NULL; 3306 struct spdk_io_channel *io_ch; 3307 struct spdk_bdev_opts bdev_opts = {}; 3308 int rc; 3309 void *buf = NULL; 3310 struct iovec iovs[2]; 3311 int iovcnt; 3312 uint64_t alignment; 3313 3314 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3315 bdev_opts.bdev_io_pool_size = 20; 3316 bdev_opts.bdev_io_cache_size = 2; 3317 bdev_opts.opts_size = sizeof(bdev_opts); 3318 ut_init_bdev(&bdev_opts); 3319 3320 fn_table.submit_request = stub_submit_request_get_buf; 3321 bdev = allocate_bdev("bdev0"); 3322 3323 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3324 CU_ASSERT(rc == 0); 3325 CU_ASSERT(desc != NULL); 3326 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3327 io_ch = spdk_bdev_get_io_channel(desc); 3328 CU_ASSERT(io_ch != NULL); 3329 3330 /* Create aligned buffer */ 3331 rc = posix_memalign(&buf, 4096, 131072); 3332 SPDK_CU_ASSERT_FATAL(rc == 0); 3333 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3334 3335 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3336 alignment = 512; 3337 bdev->required_alignment = spdk_u32log2(alignment); 3338 bdev->optimal_io_boundary = 2; 3339 bdev->split_on_optimal_io_boundary = true; 3340 3341 iovcnt = 1; 3342 iovs[0].iov_base = NULL; 3343 iovs[0].iov_len = 512 * 3; 3344 3345 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3346 CU_ASSERT(rc == 0); 3347 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3348 stub_complete_io(2); 3349 3350 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3351 alignment = 512; 3352 bdev->required_alignment = spdk_u32log2(alignment); 3353 bdev->optimal_io_boundary = 16; 3354 bdev->split_on_optimal_io_boundary = true; 3355 3356 iovcnt = 1; 3357 iovs[0].iov_base = NULL; 3358 iovs[0].iov_len = 512 * 16; 3359 3360 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3361 CU_ASSERT(rc == 0); 3362 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3363 stub_complete_io(2); 3364 3365 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3366 alignment = 512; 3367 bdev->required_alignment = spdk_u32log2(alignment); 3368 bdev->optimal_io_boundary = 128; 3369 bdev->split_on_optimal_io_boundary = true; 3370 3371 iovcnt = 1; 3372 iovs[0].iov_base = buf + 16; 3373 iovs[0].iov_len = 512 * 160; 3374 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3375 CU_ASSERT(rc == 0); 3376 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3377 stub_complete_io(2); 3378 3379 /* 512 * 3 with 2 IO boundary */ 3380 alignment = 512; 3381 bdev->required_alignment = spdk_u32log2(alignment); 3382 bdev->optimal_io_boundary = 2; 3383 bdev->split_on_optimal_io_boundary = true; 3384 3385 iovcnt = 2; 3386 iovs[0].iov_base = buf + 16; 3387 iovs[0].iov_len = 512; 3388 iovs[1].iov_base = buf + 16 + 512 + 32; 3389 iovs[1].iov_len = 1024; 3390 3391 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3392 CU_ASSERT(rc == 0); 3393 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3394 stub_complete_io(2); 3395 3396 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3397 CU_ASSERT(rc == 0); 3398 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3399 stub_complete_io(2); 3400 3401 /* 512 * 64 with 32 IO boundary */ 3402 bdev->optimal_io_boundary = 32; 3403 iovcnt = 2; 3404 iovs[0].iov_base = buf + 16; 3405 iovs[0].iov_len = 16384; 3406 iovs[1].iov_base = buf + 16 + 16384 + 32; 3407 iovs[1].iov_len = 16384; 3408 3409 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3410 CU_ASSERT(rc == 0); 3411 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3412 stub_complete_io(3); 3413 3414 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3415 CU_ASSERT(rc == 0); 3416 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3417 stub_complete_io(3); 3418 3419 /* 512 * 160 with 32 IO boundary */ 3420 iovcnt = 1; 3421 iovs[0].iov_base = buf + 16; 3422 iovs[0].iov_len = 16384 + 65536; 3423 3424 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3425 CU_ASSERT(rc == 0); 3426 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3427 stub_complete_io(6); 3428 3429 spdk_put_io_channel(io_ch); 3430 spdk_bdev_close(desc); 3431 free_bdev(bdev); 3432 fn_table.submit_request = stub_submit_request; 3433 ut_fini_bdev(); 3434 3435 free(buf); 3436 } 3437 3438 static void 3439 histogram_status_cb(void *cb_arg, int status) 3440 { 3441 g_status = status; 3442 } 3443 3444 static void 3445 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3446 { 3447 g_status = status; 3448 g_histogram = histogram; 3449 } 3450 3451 static void 3452 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3453 uint64_t total, uint64_t so_far) 3454 { 3455 g_count += count; 3456 } 3457 3458 static void 3459 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3460 { 3461 spdk_histogram_data_fn cb_fn = cb_arg; 3462 3463 g_status = status; 3464 3465 if (status == 0) { 3466 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3467 } 3468 } 3469 3470 static void 3471 bdev_histograms(void) 3472 { 3473 struct spdk_bdev *bdev; 3474 struct spdk_bdev_desc *desc = NULL; 3475 struct spdk_io_channel *ch; 3476 struct spdk_histogram_data *histogram; 3477 uint8_t buf[4096]; 3478 int rc; 3479 3480 ut_init_bdev(NULL); 3481 3482 bdev = allocate_bdev("bdev"); 3483 3484 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3485 CU_ASSERT(rc == 0); 3486 CU_ASSERT(desc != NULL); 3487 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3488 3489 ch = spdk_bdev_get_io_channel(desc); 3490 CU_ASSERT(ch != NULL); 3491 3492 /* Enable histogram */ 3493 g_status = -1; 3494 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3495 poll_threads(); 3496 CU_ASSERT(g_status == 0); 3497 CU_ASSERT(bdev->internal.histogram_enabled == true); 3498 3499 /* Allocate histogram */ 3500 histogram = spdk_histogram_data_alloc(); 3501 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3502 3503 /* Check if histogram is zeroed */ 3504 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3505 poll_threads(); 3506 CU_ASSERT(g_status == 0); 3507 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3508 3509 g_count = 0; 3510 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3511 3512 CU_ASSERT(g_count == 0); 3513 3514 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3515 CU_ASSERT(rc == 0); 3516 3517 spdk_delay_us(10); 3518 stub_complete_io(1); 3519 poll_threads(); 3520 3521 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3522 CU_ASSERT(rc == 0); 3523 3524 spdk_delay_us(10); 3525 stub_complete_io(1); 3526 poll_threads(); 3527 3528 /* Check if histogram gathered data from all I/O channels */ 3529 g_histogram = NULL; 3530 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3531 poll_threads(); 3532 CU_ASSERT(g_status == 0); 3533 CU_ASSERT(bdev->internal.histogram_enabled == true); 3534 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3535 3536 g_count = 0; 3537 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3538 CU_ASSERT(g_count == 2); 3539 3540 g_count = 0; 3541 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3542 CU_ASSERT(g_status == 0); 3543 CU_ASSERT(g_count == 2); 3544 3545 /* Disable histogram */ 3546 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3547 poll_threads(); 3548 CU_ASSERT(g_status == 0); 3549 CU_ASSERT(bdev->internal.histogram_enabled == false); 3550 3551 /* Try to run histogram commands on disabled bdev */ 3552 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3553 poll_threads(); 3554 CU_ASSERT(g_status == -EFAULT); 3555 3556 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3557 CU_ASSERT(g_status == -EFAULT); 3558 3559 spdk_histogram_data_free(histogram); 3560 spdk_put_io_channel(ch); 3561 spdk_bdev_close(desc); 3562 free_bdev(bdev); 3563 ut_fini_bdev(); 3564 } 3565 3566 static void 3567 _bdev_compare(bool emulated) 3568 { 3569 struct spdk_bdev *bdev; 3570 struct spdk_bdev_desc *desc = NULL; 3571 struct spdk_io_channel *ioch; 3572 struct ut_expected_io *expected_io; 3573 uint64_t offset, num_blocks; 3574 uint32_t num_completed; 3575 char aa_buf[512]; 3576 char bb_buf[512]; 3577 struct iovec compare_iov; 3578 uint8_t expected_io_type; 3579 int rc; 3580 3581 if (emulated) { 3582 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3583 } else { 3584 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3585 } 3586 3587 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3588 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3589 3590 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3591 3592 ut_init_bdev(NULL); 3593 fn_table.submit_request = stub_submit_request_get_buf; 3594 bdev = allocate_bdev("bdev"); 3595 3596 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3597 CU_ASSERT_EQUAL(rc, 0); 3598 SPDK_CU_ASSERT_FATAL(desc != NULL); 3599 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3600 ioch = spdk_bdev_get_io_channel(desc); 3601 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3602 3603 fn_table.submit_request = stub_submit_request_get_buf; 3604 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3605 3606 offset = 50; 3607 num_blocks = 1; 3608 compare_iov.iov_base = aa_buf; 3609 compare_iov.iov_len = sizeof(aa_buf); 3610 3611 /* 1. successful comparev */ 3612 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3614 3615 g_io_done = false; 3616 g_compare_read_buf = aa_buf; 3617 g_compare_read_buf_len = sizeof(aa_buf); 3618 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3619 CU_ASSERT_EQUAL(rc, 0); 3620 num_completed = stub_complete_io(1); 3621 CU_ASSERT_EQUAL(num_completed, 1); 3622 CU_ASSERT(g_io_done == true); 3623 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3624 3625 /* 2. miscompare comparev */ 3626 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3627 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3628 3629 g_io_done = false; 3630 g_compare_read_buf = bb_buf; 3631 g_compare_read_buf_len = sizeof(bb_buf); 3632 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3633 CU_ASSERT_EQUAL(rc, 0); 3634 num_completed = stub_complete_io(1); 3635 CU_ASSERT_EQUAL(num_completed, 1); 3636 CU_ASSERT(g_io_done == true); 3637 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3638 3639 /* 3. successful compare */ 3640 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3641 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3642 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3643 3644 g_io_done = false; 3645 g_compare_read_buf = aa_buf; 3646 g_compare_read_buf_len = sizeof(aa_buf); 3647 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3648 CU_ASSERT_EQUAL(rc, 0); 3649 num_completed = stub_complete_io(1); 3650 CU_ASSERT_EQUAL(num_completed, 1); 3651 CU_ASSERT(g_io_done == true); 3652 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3653 3654 /* 4. miscompare compare */ 3655 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3656 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3657 3658 g_io_done = false; 3659 g_compare_read_buf = bb_buf; 3660 g_compare_read_buf_len = sizeof(bb_buf); 3661 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3662 CU_ASSERT_EQUAL(rc, 0); 3663 num_completed = stub_complete_io(1); 3664 CU_ASSERT_EQUAL(num_completed, 1); 3665 CU_ASSERT(g_io_done == true); 3666 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3667 3668 spdk_put_io_channel(ioch); 3669 spdk_bdev_close(desc); 3670 free_bdev(bdev); 3671 fn_table.submit_request = stub_submit_request; 3672 ut_fini_bdev(); 3673 3674 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3675 3676 g_compare_read_buf = NULL; 3677 } 3678 3679 static void 3680 _bdev_compare_with_md(bool emulated) 3681 { 3682 struct spdk_bdev *bdev; 3683 struct spdk_bdev_desc *desc = NULL; 3684 struct spdk_io_channel *ioch; 3685 struct ut_expected_io *expected_io; 3686 uint64_t offset, num_blocks; 3687 uint32_t num_completed; 3688 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3689 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3690 char buf_miscompare[1024 /* 2 * blocklen */]; 3691 char md_buf[16]; 3692 char md_buf_miscompare[16]; 3693 struct iovec compare_iov; 3694 uint8_t expected_io_type; 3695 int rc; 3696 3697 if (emulated) { 3698 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3699 } else { 3700 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3701 } 3702 3703 memset(buf, 0xaa, sizeof(buf)); 3704 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3705 /* make last md different */ 3706 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3707 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3708 memset(md_buf, 0xaa, 16); 3709 memset(md_buf_miscompare, 0xbb, 16); 3710 3711 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3712 3713 ut_init_bdev(NULL); 3714 fn_table.submit_request = stub_submit_request_get_buf; 3715 bdev = allocate_bdev("bdev"); 3716 3717 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3718 CU_ASSERT_EQUAL(rc, 0); 3719 SPDK_CU_ASSERT_FATAL(desc != NULL); 3720 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3721 ioch = spdk_bdev_get_io_channel(desc); 3722 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3723 3724 fn_table.submit_request = stub_submit_request_get_buf; 3725 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3726 3727 offset = 50; 3728 num_blocks = 2; 3729 3730 /* interleaved md & data */ 3731 bdev->md_interleave = true; 3732 bdev->md_len = 8; 3733 bdev->blocklen = 512 + 8; 3734 compare_iov.iov_base = buf; 3735 compare_iov.iov_len = sizeof(buf); 3736 3737 /* 1. successful compare with md interleaved */ 3738 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3739 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3740 3741 g_io_done = false; 3742 g_compare_read_buf = buf; 3743 g_compare_read_buf_len = sizeof(buf); 3744 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3745 CU_ASSERT_EQUAL(rc, 0); 3746 num_completed = stub_complete_io(1); 3747 CU_ASSERT_EQUAL(num_completed, 1); 3748 CU_ASSERT(g_io_done == true); 3749 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3750 3751 /* 2. miscompare with md interleaved */ 3752 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3753 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3754 3755 g_io_done = false; 3756 g_compare_read_buf = buf_interleaved_miscompare; 3757 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3758 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3759 CU_ASSERT_EQUAL(rc, 0); 3760 num_completed = stub_complete_io(1); 3761 CU_ASSERT_EQUAL(num_completed, 1); 3762 CU_ASSERT(g_io_done == true); 3763 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3764 3765 /* Separate data & md buffers */ 3766 bdev->md_interleave = false; 3767 bdev->blocklen = 512; 3768 compare_iov.iov_base = buf; 3769 compare_iov.iov_len = 1024; 3770 3771 /* 3. successful compare with md separated */ 3772 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3773 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3774 3775 g_io_done = false; 3776 g_compare_read_buf = buf; 3777 g_compare_read_buf_len = 1024; 3778 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3779 g_compare_md_buf = md_buf; 3780 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3781 offset, num_blocks, io_done, NULL); 3782 CU_ASSERT_EQUAL(rc, 0); 3783 num_completed = stub_complete_io(1); 3784 CU_ASSERT_EQUAL(num_completed, 1); 3785 CU_ASSERT(g_io_done == true); 3786 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3787 3788 /* 4. miscompare with md separated where md buf is different */ 3789 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3790 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3791 3792 g_io_done = false; 3793 g_compare_read_buf = buf; 3794 g_compare_read_buf_len = 1024; 3795 g_compare_md_buf = md_buf_miscompare; 3796 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3797 offset, num_blocks, io_done, NULL); 3798 CU_ASSERT_EQUAL(rc, 0); 3799 num_completed = stub_complete_io(1); 3800 CU_ASSERT_EQUAL(num_completed, 1); 3801 CU_ASSERT(g_io_done == true); 3802 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3803 3804 /* 5. miscompare with md separated where buf is different */ 3805 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3806 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3807 3808 g_io_done = false; 3809 g_compare_read_buf = buf_miscompare; 3810 g_compare_read_buf_len = sizeof(buf_miscompare); 3811 g_compare_md_buf = md_buf; 3812 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3813 offset, num_blocks, io_done, NULL); 3814 CU_ASSERT_EQUAL(rc, 0); 3815 num_completed = stub_complete_io(1); 3816 CU_ASSERT_EQUAL(num_completed, 1); 3817 CU_ASSERT(g_io_done == true); 3818 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3819 3820 bdev->md_len = 0; 3821 g_compare_md_buf = NULL; 3822 3823 spdk_put_io_channel(ioch); 3824 spdk_bdev_close(desc); 3825 free_bdev(bdev); 3826 fn_table.submit_request = stub_submit_request; 3827 ut_fini_bdev(); 3828 3829 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3830 3831 g_compare_read_buf = NULL; 3832 } 3833 3834 static void 3835 bdev_compare(void) 3836 { 3837 _bdev_compare(false); 3838 _bdev_compare_with_md(false); 3839 } 3840 3841 static void 3842 bdev_compare_emulated(void) 3843 { 3844 _bdev_compare(true); 3845 _bdev_compare_with_md(true); 3846 } 3847 3848 static void 3849 bdev_compare_and_write(void) 3850 { 3851 struct spdk_bdev *bdev; 3852 struct spdk_bdev_desc *desc = NULL; 3853 struct spdk_io_channel *ioch; 3854 struct ut_expected_io *expected_io; 3855 uint64_t offset, num_blocks; 3856 uint32_t num_completed; 3857 char aa_buf[512]; 3858 char bb_buf[512]; 3859 char cc_buf[512]; 3860 char write_buf[512]; 3861 struct iovec compare_iov; 3862 struct iovec write_iov; 3863 int rc; 3864 3865 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3866 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3867 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3868 3869 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3870 3871 ut_init_bdev(NULL); 3872 fn_table.submit_request = stub_submit_request_get_buf; 3873 bdev = allocate_bdev("bdev"); 3874 3875 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3876 CU_ASSERT_EQUAL(rc, 0); 3877 SPDK_CU_ASSERT_FATAL(desc != NULL); 3878 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3879 ioch = spdk_bdev_get_io_channel(desc); 3880 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3881 3882 fn_table.submit_request = stub_submit_request_get_buf; 3883 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3884 3885 offset = 50; 3886 num_blocks = 1; 3887 compare_iov.iov_base = aa_buf; 3888 compare_iov.iov_len = sizeof(aa_buf); 3889 write_iov.iov_base = bb_buf; 3890 write_iov.iov_len = sizeof(bb_buf); 3891 3892 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3893 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3894 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3895 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3896 3897 g_io_done = false; 3898 g_compare_read_buf = aa_buf; 3899 g_compare_read_buf_len = sizeof(aa_buf); 3900 memset(write_buf, 0, sizeof(write_buf)); 3901 g_compare_write_buf = write_buf; 3902 g_compare_write_buf_len = sizeof(write_buf); 3903 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3904 offset, num_blocks, io_done, NULL); 3905 /* Trigger range locking */ 3906 poll_threads(); 3907 CU_ASSERT_EQUAL(rc, 0); 3908 num_completed = stub_complete_io(1); 3909 CU_ASSERT_EQUAL(num_completed, 1); 3910 CU_ASSERT(g_io_done == false); 3911 num_completed = stub_complete_io(1); 3912 /* Trigger range unlocking */ 3913 poll_threads(); 3914 CU_ASSERT_EQUAL(num_completed, 1); 3915 CU_ASSERT(g_io_done == true); 3916 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3917 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3918 3919 /* Test miscompare */ 3920 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3921 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3922 3923 g_io_done = false; 3924 g_compare_read_buf = cc_buf; 3925 g_compare_read_buf_len = sizeof(cc_buf); 3926 memset(write_buf, 0, sizeof(write_buf)); 3927 g_compare_write_buf = write_buf; 3928 g_compare_write_buf_len = sizeof(write_buf); 3929 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3930 offset, num_blocks, io_done, NULL); 3931 /* Trigger range locking */ 3932 poll_threads(); 3933 CU_ASSERT_EQUAL(rc, 0); 3934 num_completed = stub_complete_io(1); 3935 /* Trigger range unlocking earlier because we expect error here */ 3936 poll_threads(); 3937 CU_ASSERT_EQUAL(num_completed, 1); 3938 CU_ASSERT(g_io_done == true); 3939 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3940 num_completed = stub_complete_io(1); 3941 CU_ASSERT_EQUAL(num_completed, 0); 3942 3943 spdk_put_io_channel(ioch); 3944 spdk_bdev_close(desc); 3945 free_bdev(bdev); 3946 fn_table.submit_request = stub_submit_request; 3947 ut_fini_bdev(); 3948 3949 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3950 3951 g_compare_read_buf = NULL; 3952 g_compare_write_buf = NULL; 3953 } 3954 3955 static void 3956 bdev_write_zeroes(void) 3957 { 3958 struct spdk_bdev *bdev; 3959 struct spdk_bdev_desc *desc = NULL; 3960 struct spdk_io_channel *ioch; 3961 struct ut_expected_io *expected_io; 3962 uint64_t offset, num_io_blocks, num_blocks; 3963 uint32_t num_completed, num_requests; 3964 int rc; 3965 3966 ut_init_bdev(NULL); 3967 bdev = allocate_bdev("bdev"); 3968 3969 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3970 CU_ASSERT_EQUAL(rc, 0); 3971 SPDK_CU_ASSERT_FATAL(desc != NULL); 3972 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3973 ioch = spdk_bdev_get_io_channel(desc); 3974 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3975 3976 fn_table.submit_request = stub_submit_request; 3977 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3978 3979 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3980 bdev->md_len = 0; 3981 bdev->blocklen = 4096; 3982 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3983 3984 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3985 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3986 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3987 CU_ASSERT_EQUAL(rc, 0); 3988 num_completed = stub_complete_io(1); 3989 CU_ASSERT_EQUAL(num_completed, 1); 3990 3991 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3992 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3993 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3994 num_requests = 2; 3995 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3996 3997 for (offset = 0; offset < num_requests; ++offset) { 3998 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3999 offset * num_io_blocks, num_io_blocks, 0); 4000 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4001 } 4002 4003 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4004 CU_ASSERT_EQUAL(rc, 0); 4005 num_completed = stub_complete_io(num_requests); 4006 CU_ASSERT_EQUAL(num_completed, num_requests); 4007 4008 /* Check that the splitting is correct if bdev has interleaved metadata */ 4009 bdev->md_interleave = true; 4010 bdev->md_len = 64; 4011 bdev->blocklen = 4096 + 64; 4012 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4013 4014 num_requests = offset = 0; 4015 while (offset < num_blocks) { 4016 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 4017 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4018 offset, num_io_blocks, 0); 4019 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4020 offset += num_io_blocks; 4021 num_requests++; 4022 } 4023 4024 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4025 CU_ASSERT_EQUAL(rc, 0); 4026 num_completed = stub_complete_io(num_requests); 4027 CU_ASSERT_EQUAL(num_completed, num_requests); 4028 num_completed = stub_complete_io(num_requests); 4029 assert(num_completed == 0); 4030 4031 /* Check the the same for separate metadata buffer */ 4032 bdev->md_interleave = false; 4033 bdev->md_len = 64; 4034 bdev->blocklen = 4096; 4035 4036 num_requests = offset = 0; 4037 while (offset < num_blocks) { 4038 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 4039 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4040 offset, num_io_blocks, 0); 4041 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 4042 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4043 offset += num_io_blocks; 4044 num_requests++; 4045 } 4046 4047 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4048 CU_ASSERT_EQUAL(rc, 0); 4049 num_completed = stub_complete_io(num_requests); 4050 CU_ASSERT_EQUAL(num_completed, num_requests); 4051 4052 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 4053 spdk_put_io_channel(ioch); 4054 spdk_bdev_close(desc); 4055 free_bdev(bdev); 4056 ut_fini_bdev(); 4057 } 4058 4059 static void 4060 bdev_zcopy_write(void) 4061 { 4062 struct spdk_bdev *bdev; 4063 struct spdk_bdev_desc *desc = NULL; 4064 struct spdk_io_channel *ioch; 4065 struct ut_expected_io *expected_io; 4066 uint64_t offset, num_blocks; 4067 uint32_t num_completed; 4068 char aa_buf[512]; 4069 struct iovec iov; 4070 int rc; 4071 const bool populate = false; 4072 const bool commit = true; 4073 4074 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4075 4076 ut_init_bdev(NULL); 4077 bdev = allocate_bdev("bdev"); 4078 4079 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4080 CU_ASSERT_EQUAL(rc, 0); 4081 SPDK_CU_ASSERT_FATAL(desc != NULL); 4082 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4083 ioch = spdk_bdev_get_io_channel(desc); 4084 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4085 4086 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4087 4088 offset = 50; 4089 num_blocks = 1; 4090 iov.iov_base = NULL; 4091 iov.iov_len = 0; 4092 4093 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4094 g_zcopy_read_buf_len = (uint32_t) -1; 4095 /* Do a zcopy start for a write (populate=false) */ 4096 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4097 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4098 g_io_done = false; 4099 g_zcopy_write_buf = aa_buf; 4100 g_zcopy_write_buf_len = sizeof(aa_buf); 4101 g_zcopy_bdev_io = NULL; 4102 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4103 CU_ASSERT_EQUAL(rc, 0); 4104 num_completed = stub_complete_io(1); 4105 CU_ASSERT_EQUAL(num_completed, 1); 4106 CU_ASSERT(g_io_done == true); 4107 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4108 /* Check that the iov has been set up */ 4109 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4110 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4111 /* Check that the bdev_io has been saved */ 4112 CU_ASSERT(g_zcopy_bdev_io != NULL); 4113 /* Now do the zcopy end for a write (commit=true) */ 4114 g_io_done = false; 4115 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4116 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4117 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4118 CU_ASSERT_EQUAL(rc, 0); 4119 num_completed = stub_complete_io(1); 4120 CU_ASSERT_EQUAL(num_completed, 1); 4121 CU_ASSERT(g_io_done == true); 4122 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4123 /* Check the g_zcopy are reset by io_done */ 4124 CU_ASSERT(g_zcopy_write_buf == NULL); 4125 CU_ASSERT(g_zcopy_write_buf_len == 0); 4126 /* Check that io_done has freed the g_zcopy_bdev_io */ 4127 CU_ASSERT(g_zcopy_bdev_io == NULL); 4128 4129 /* Check the zcopy read buffer has not been touched which 4130 * ensures that the correct buffers were used. 4131 */ 4132 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4133 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4134 4135 spdk_put_io_channel(ioch); 4136 spdk_bdev_close(desc); 4137 free_bdev(bdev); 4138 ut_fini_bdev(); 4139 } 4140 4141 static void 4142 bdev_zcopy_read(void) 4143 { 4144 struct spdk_bdev *bdev; 4145 struct spdk_bdev_desc *desc = NULL; 4146 struct spdk_io_channel *ioch; 4147 struct ut_expected_io *expected_io; 4148 uint64_t offset, num_blocks; 4149 uint32_t num_completed; 4150 char aa_buf[512]; 4151 struct iovec iov; 4152 int rc; 4153 const bool populate = true; 4154 const bool commit = false; 4155 4156 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4157 4158 ut_init_bdev(NULL); 4159 bdev = allocate_bdev("bdev"); 4160 4161 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4162 CU_ASSERT_EQUAL(rc, 0); 4163 SPDK_CU_ASSERT_FATAL(desc != NULL); 4164 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4165 ioch = spdk_bdev_get_io_channel(desc); 4166 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4167 4168 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4169 4170 offset = 50; 4171 num_blocks = 1; 4172 iov.iov_base = NULL; 4173 iov.iov_len = 0; 4174 4175 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4176 g_zcopy_write_buf_len = (uint32_t) -1; 4177 4178 /* Do a zcopy start for a read (populate=true) */ 4179 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4180 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4181 g_io_done = false; 4182 g_zcopy_read_buf = aa_buf; 4183 g_zcopy_read_buf_len = sizeof(aa_buf); 4184 g_zcopy_bdev_io = NULL; 4185 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4186 CU_ASSERT_EQUAL(rc, 0); 4187 num_completed = stub_complete_io(1); 4188 CU_ASSERT_EQUAL(num_completed, 1); 4189 CU_ASSERT(g_io_done == true); 4190 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4191 /* Check that the iov has been set up */ 4192 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4193 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4194 /* Check that the bdev_io has been saved */ 4195 CU_ASSERT(g_zcopy_bdev_io != NULL); 4196 4197 /* Now do the zcopy end for a read (commit=false) */ 4198 g_io_done = false; 4199 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4200 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4201 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4202 CU_ASSERT_EQUAL(rc, 0); 4203 num_completed = stub_complete_io(1); 4204 CU_ASSERT_EQUAL(num_completed, 1); 4205 CU_ASSERT(g_io_done == true); 4206 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4207 /* Check the g_zcopy are reset by io_done */ 4208 CU_ASSERT(g_zcopy_read_buf == NULL); 4209 CU_ASSERT(g_zcopy_read_buf_len == 0); 4210 /* Check that io_done has freed the g_zcopy_bdev_io */ 4211 CU_ASSERT(g_zcopy_bdev_io == NULL); 4212 4213 /* Check the zcopy write buffer has not been touched which 4214 * ensures that the correct buffers were used. 4215 */ 4216 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4217 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4218 4219 spdk_put_io_channel(ioch); 4220 spdk_bdev_close(desc); 4221 free_bdev(bdev); 4222 ut_fini_bdev(); 4223 } 4224 4225 static void 4226 bdev_open_while_hotremove(void) 4227 { 4228 struct spdk_bdev *bdev; 4229 struct spdk_bdev_desc *desc[2] = {}; 4230 int rc; 4231 4232 bdev = allocate_bdev("bdev"); 4233 4234 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4235 CU_ASSERT(rc == 0); 4236 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4237 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4238 4239 spdk_bdev_unregister(bdev, NULL, NULL); 4240 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4241 poll_threads(); 4242 4243 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4244 CU_ASSERT(rc == -ENODEV); 4245 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4246 4247 spdk_bdev_close(desc[0]); 4248 free_bdev(bdev); 4249 } 4250 4251 static void 4252 bdev_close_while_hotremove(void) 4253 { 4254 struct spdk_bdev *bdev; 4255 struct spdk_bdev_desc *desc = NULL; 4256 int rc = 0; 4257 4258 bdev = allocate_bdev("bdev"); 4259 4260 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4261 CU_ASSERT_EQUAL(rc, 0); 4262 SPDK_CU_ASSERT_FATAL(desc != NULL); 4263 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4264 4265 /* Simulate hot-unplug by unregistering bdev */ 4266 g_event_type1 = 0xFF; 4267 g_unregister_arg = NULL; 4268 g_unregister_rc = -1; 4269 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4270 /* Close device while remove event is in flight */ 4271 spdk_bdev_close(desc); 4272 4273 /* Ensure that unregister callback is delayed */ 4274 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4275 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4276 4277 poll_threads(); 4278 4279 /* Event callback shall not be issued because device was closed */ 4280 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4281 /* Unregister callback is issued */ 4282 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4283 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4284 4285 free_bdev(bdev); 4286 } 4287 4288 static void 4289 bdev_open_ext(void) 4290 { 4291 struct spdk_bdev *bdev; 4292 struct spdk_bdev_desc *desc1 = NULL; 4293 struct spdk_bdev_desc *desc2 = NULL; 4294 int rc = 0; 4295 4296 bdev = allocate_bdev("bdev"); 4297 4298 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4299 CU_ASSERT_EQUAL(rc, -EINVAL); 4300 4301 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4302 CU_ASSERT_EQUAL(rc, 0); 4303 4304 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4305 CU_ASSERT_EQUAL(rc, 0); 4306 4307 g_event_type1 = 0xFF; 4308 g_event_type2 = 0xFF; 4309 4310 /* Simulate hot-unplug by unregistering bdev */ 4311 spdk_bdev_unregister(bdev, NULL, NULL); 4312 poll_threads(); 4313 4314 /* Check if correct events have been triggered in event callback fn */ 4315 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4316 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4317 4318 free_bdev(bdev); 4319 poll_threads(); 4320 } 4321 4322 static void 4323 bdev_open_ext_unregister(void) 4324 { 4325 struct spdk_bdev *bdev; 4326 struct spdk_bdev_desc *desc1 = NULL; 4327 struct spdk_bdev_desc *desc2 = NULL; 4328 struct spdk_bdev_desc *desc3 = NULL; 4329 struct spdk_bdev_desc *desc4 = NULL; 4330 int rc = 0; 4331 4332 bdev = allocate_bdev("bdev"); 4333 4334 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4335 CU_ASSERT_EQUAL(rc, -EINVAL); 4336 4337 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4338 CU_ASSERT_EQUAL(rc, 0); 4339 4340 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4341 CU_ASSERT_EQUAL(rc, 0); 4342 4343 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4344 CU_ASSERT_EQUAL(rc, 0); 4345 4346 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4347 CU_ASSERT_EQUAL(rc, 0); 4348 4349 g_event_type1 = 0xFF; 4350 g_event_type2 = 0xFF; 4351 g_event_type3 = 0xFF; 4352 g_event_type4 = 0xFF; 4353 4354 g_unregister_arg = NULL; 4355 g_unregister_rc = -1; 4356 4357 /* Simulate hot-unplug by unregistering bdev */ 4358 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4359 4360 /* 4361 * Unregister is handled asynchronously and event callback 4362 * (i.e., above bdev_open_cbN) will be called. 4363 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4364 * close the desc3 and desc4 so that the bdev is not closed. 4365 */ 4366 poll_threads(); 4367 4368 /* Check if correct events have been triggered in event callback fn */ 4369 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4370 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4371 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4372 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4373 4374 /* Check that unregister callback is delayed */ 4375 CU_ASSERT(g_unregister_arg == NULL); 4376 CU_ASSERT(g_unregister_rc == -1); 4377 4378 /* 4379 * Explicitly close desc3. As desc4 is still opened there, the 4380 * unergister callback is still delayed to execute. 4381 */ 4382 spdk_bdev_close(desc3); 4383 CU_ASSERT(g_unregister_arg == NULL); 4384 CU_ASSERT(g_unregister_rc == -1); 4385 4386 /* 4387 * Explicitly close desc4 to trigger the ongoing bdev unregister 4388 * operation after last desc is closed. 4389 */ 4390 spdk_bdev_close(desc4); 4391 4392 /* Poll the thread for the async unregister operation */ 4393 poll_threads(); 4394 4395 /* Check that unregister callback is executed */ 4396 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4397 CU_ASSERT(g_unregister_rc == 0); 4398 4399 free_bdev(bdev); 4400 poll_threads(); 4401 } 4402 4403 struct timeout_io_cb_arg { 4404 struct iovec iov; 4405 uint8_t type; 4406 }; 4407 4408 static int 4409 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4410 { 4411 struct spdk_bdev_io *bdev_io; 4412 int n = 0; 4413 4414 if (!ch) { 4415 return -1; 4416 } 4417 4418 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4419 n++; 4420 } 4421 4422 return n; 4423 } 4424 4425 static void 4426 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4427 { 4428 struct timeout_io_cb_arg *ctx = cb_arg; 4429 4430 ctx->type = bdev_io->type; 4431 ctx->iov.iov_base = bdev_io->iov.iov_base; 4432 ctx->iov.iov_len = bdev_io->iov.iov_len; 4433 } 4434 4435 static void 4436 bdev_set_io_timeout(void) 4437 { 4438 struct spdk_bdev *bdev; 4439 struct spdk_bdev_desc *desc = NULL; 4440 struct spdk_io_channel *io_ch = NULL; 4441 struct spdk_bdev_channel *bdev_ch = NULL; 4442 struct timeout_io_cb_arg cb_arg; 4443 4444 ut_init_bdev(NULL); 4445 bdev = allocate_bdev("bdev"); 4446 4447 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4448 SPDK_CU_ASSERT_FATAL(desc != NULL); 4449 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4450 4451 io_ch = spdk_bdev_get_io_channel(desc); 4452 CU_ASSERT(io_ch != NULL); 4453 4454 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4455 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4456 4457 /* This is the part1. 4458 * We will check the bdev_ch->io_submitted list 4459 * TO make sure that it can link IOs and only the user submitted IOs 4460 */ 4461 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4462 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4463 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4464 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4465 stub_complete_io(1); 4466 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4467 stub_complete_io(1); 4468 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4469 4470 /* Split IO */ 4471 bdev->optimal_io_boundary = 16; 4472 bdev->split_on_optimal_io_boundary = true; 4473 4474 /* Now test that a single-vector command is split correctly. 4475 * Offset 14, length 8, payload 0xF000 4476 * Child - Offset 14, length 2, payload 0xF000 4477 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4478 * 4479 * Set up the expected values before calling spdk_bdev_read_blocks 4480 */ 4481 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4482 /* We count all submitted IOs including IO that are generated by splitting. */ 4483 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4484 stub_complete_io(1); 4485 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4486 stub_complete_io(1); 4487 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4488 4489 /* Also include the reset IO */ 4490 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4491 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4492 poll_threads(); 4493 stub_complete_io(1); 4494 poll_threads(); 4495 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4496 4497 /* This is part2 4498 * Test the desc timeout poller register 4499 */ 4500 4501 /* Successfully set the timeout */ 4502 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4503 CU_ASSERT(desc->io_timeout_poller != NULL); 4504 CU_ASSERT(desc->timeout_in_sec == 30); 4505 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4506 CU_ASSERT(desc->cb_arg == &cb_arg); 4507 4508 /* Change the timeout limit */ 4509 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4510 CU_ASSERT(desc->io_timeout_poller != NULL); 4511 CU_ASSERT(desc->timeout_in_sec == 20); 4512 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4513 CU_ASSERT(desc->cb_arg == &cb_arg); 4514 4515 /* Disable the timeout */ 4516 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4517 CU_ASSERT(desc->io_timeout_poller == NULL); 4518 4519 /* This the part3 4520 * We will test to catch timeout IO and check whether the IO is 4521 * the submitted one. 4522 */ 4523 memset(&cb_arg, 0, sizeof(cb_arg)); 4524 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4525 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4526 4527 /* Don't reach the limit */ 4528 spdk_delay_us(15 * spdk_get_ticks_hz()); 4529 poll_threads(); 4530 CU_ASSERT(cb_arg.type == 0); 4531 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4532 CU_ASSERT(cb_arg.iov.iov_len == 0); 4533 4534 /* 15 + 15 = 30 reach the limit */ 4535 spdk_delay_us(15 * spdk_get_ticks_hz()); 4536 poll_threads(); 4537 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4538 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4539 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4540 stub_complete_io(1); 4541 4542 /* Use the same split IO above and check the IO */ 4543 memset(&cb_arg, 0, sizeof(cb_arg)); 4544 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4545 4546 /* The first child complete in time */ 4547 spdk_delay_us(15 * spdk_get_ticks_hz()); 4548 poll_threads(); 4549 stub_complete_io(1); 4550 CU_ASSERT(cb_arg.type == 0); 4551 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4552 CU_ASSERT(cb_arg.iov.iov_len == 0); 4553 4554 /* The second child reach the limit */ 4555 spdk_delay_us(15 * spdk_get_ticks_hz()); 4556 poll_threads(); 4557 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4558 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4559 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4560 stub_complete_io(1); 4561 4562 /* Also include the reset IO */ 4563 memset(&cb_arg, 0, sizeof(cb_arg)); 4564 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4565 spdk_delay_us(30 * spdk_get_ticks_hz()); 4566 poll_threads(); 4567 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4568 stub_complete_io(1); 4569 poll_threads(); 4570 4571 spdk_put_io_channel(io_ch); 4572 spdk_bdev_close(desc); 4573 free_bdev(bdev); 4574 ut_fini_bdev(); 4575 } 4576 4577 static void 4578 bdev_set_qd_sampling(void) 4579 { 4580 struct spdk_bdev *bdev; 4581 struct spdk_bdev_desc *desc = NULL; 4582 struct spdk_io_channel *io_ch = NULL; 4583 struct spdk_bdev_channel *bdev_ch = NULL; 4584 struct timeout_io_cb_arg cb_arg; 4585 4586 ut_init_bdev(NULL); 4587 bdev = allocate_bdev("bdev"); 4588 4589 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4590 SPDK_CU_ASSERT_FATAL(desc != NULL); 4591 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4592 4593 io_ch = spdk_bdev_get_io_channel(desc); 4594 CU_ASSERT(io_ch != NULL); 4595 4596 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4597 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4598 4599 /* This is the part1. 4600 * We will check the bdev_ch->io_submitted list 4601 * TO make sure that it can link IOs and only the user submitted IOs 4602 */ 4603 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4604 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4605 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4606 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4607 stub_complete_io(1); 4608 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4609 stub_complete_io(1); 4610 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4611 4612 /* This is the part2. 4613 * Test the bdev's qd poller register 4614 */ 4615 /* 1st Successfully set the qd sampling period */ 4616 spdk_bdev_set_qd_sampling_period(bdev, 10); 4617 CU_ASSERT(bdev->internal.new_period == 10); 4618 CU_ASSERT(bdev->internal.period == 10); 4619 CU_ASSERT(bdev->internal.qd_desc != NULL); 4620 poll_threads(); 4621 CU_ASSERT(bdev->internal.qd_poller != NULL); 4622 4623 /* 2nd Change the qd sampling period */ 4624 spdk_bdev_set_qd_sampling_period(bdev, 20); 4625 CU_ASSERT(bdev->internal.new_period == 20); 4626 CU_ASSERT(bdev->internal.period == 10); 4627 CU_ASSERT(bdev->internal.qd_desc != NULL); 4628 poll_threads(); 4629 CU_ASSERT(bdev->internal.qd_poller != NULL); 4630 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4631 4632 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4633 spdk_delay_us(20); 4634 poll_thread_times(0, 1); 4635 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4636 spdk_bdev_set_qd_sampling_period(bdev, 30); 4637 CU_ASSERT(bdev->internal.new_period == 30); 4638 CU_ASSERT(bdev->internal.period == 20); 4639 poll_threads(); 4640 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4641 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4642 4643 /* 4th Disable the qd sampling period */ 4644 spdk_bdev_set_qd_sampling_period(bdev, 0); 4645 CU_ASSERT(bdev->internal.new_period == 0); 4646 CU_ASSERT(bdev->internal.period == 30); 4647 poll_threads(); 4648 CU_ASSERT(bdev->internal.qd_poller == NULL); 4649 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4650 CU_ASSERT(bdev->internal.qd_desc == NULL); 4651 4652 /* This is the part3. 4653 * We will test the submitted IO and reset works 4654 * properly with the qd sampling. 4655 */ 4656 memset(&cb_arg, 0, sizeof(cb_arg)); 4657 spdk_bdev_set_qd_sampling_period(bdev, 1); 4658 poll_threads(); 4659 4660 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4661 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4662 4663 /* Also include the reset IO */ 4664 memset(&cb_arg, 0, sizeof(cb_arg)); 4665 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4666 poll_threads(); 4667 4668 /* Close the desc */ 4669 spdk_put_io_channel(io_ch); 4670 spdk_bdev_close(desc); 4671 4672 /* Complete the submitted IO and reset */ 4673 stub_complete_io(2); 4674 poll_threads(); 4675 4676 free_bdev(bdev); 4677 ut_fini_bdev(); 4678 } 4679 4680 static void 4681 lba_range_overlap(void) 4682 { 4683 struct lba_range r1, r2; 4684 4685 r1.offset = 100; 4686 r1.length = 50; 4687 4688 r2.offset = 0; 4689 r2.length = 1; 4690 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4691 4692 r2.offset = 0; 4693 r2.length = 100; 4694 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4695 4696 r2.offset = 0; 4697 r2.length = 110; 4698 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4699 4700 r2.offset = 100; 4701 r2.length = 10; 4702 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4703 4704 r2.offset = 110; 4705 r2.length = 20; 4706 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4707 4708 r2.offset = 140; 4709 r2.length = 150; 4710 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4711 4712 r2.offset = 130; 4713 r2.length = 200; 4714 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4715 4716 r2.offset = 150; 4717 r2.length = 100; 4718 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4719 4720 r2.offset = 110; 4721 r2.length = 0; 4722 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4723 } 4724 4725 static bool g_lock_lba_range_done; 4726 static bool g_unlock_lba_range_done; 4727 4728 static void 4729 lock_lba_range_done(void *ctx, int status) 4730 { 4731 g_lock_lba_range_done = true; 4732 } 4733 4734 static void 4735 unlock_lba_range_done(void *ctx, int status) 4736 { 4737 g_unlock_lba_range_done = true; 4738 } 4739 4740 static void 4741 lock_lba_range_check_ranges(void) 4742 { 4743 struct spdk_bdev *bdev; 4744 struct spdk_bdev_desc *desc = NULL; 4745 struct spdk_io_channel *io_ch; 4746 struct spdk_bdev_channel *channel; 4747 struct lba_range *range; 4748 int ctx1; 4749 int rc; 4750 4751 ut_init_bdev(NULL); 4752 bdev = allocate_bdev("bdev0"); 4753 4754 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4755 CU_ASSERT(rc == 0); 4756 CU_ASSERT(desc != NULL); 4757 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4758 io_ch = spdk_bdev_get_io_channel(desc); 4759 CU_ASSERT(io_ch != NULL); 4760 channel = spdk_io_channel_get_ctx(io_ch); 4761 4762 g_lock_lba_range_done = false; 4763 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4764 CU_ASSERT(rc == 0); 4765 poll_threads(); 4766 4767 CU_ASSERT(g_lock_lba_range_done == true); 4768 range = TAILQ_FIRST(&channel->locked_ranges); 4769 SPDK_CU_ASSERT_FATAL(range != NULL); 4770 CU_ASSERT(range->offset == 20); 4771 CU_ASSERT(range->length == 10); 4772 CU_ASSERT(range->owner_ch == channel); 4773 4774 /* Unlocks must exactly match a lock. */ 4775 g_unlock_lba_range_done = false; 4776 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4777 CU_ASSERT(rc == -EINVAL); 4778 CU_ASSERT(g_unlock_lba_range_done == false); 4779 4780 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4781 CU_ASSERT(rc == 0); 4782 spdk_delay_us(100); 4783 poll_threads(); 4784 4785 CU_ASSERT(g_unlock_lba_range_done == true); 4786 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4787 4788 spdk_put_io_channel(io_ch); 4789 spdk_bdev_close(desc); 4790 free_bdev(bdev); 4791 ut_fini_bdev(); 4792 } 4793 4794 static void 4795 lock_lba_range_with_io_outstanding(void) 4796 { 4797 struct spdk_bdev *bdev; 4798 struct spdk_bdev_desc *desc = NULL; 4799 struct spdk_io_channel *io_ch; 4800 struct spdk_bdev_channel *channel; 4801 struct lba_range *range; 4802 char buf[4096]; 4803 int ctx1; 4804 int rc; 4805 4806 ut_init_bdev(NULL); 4807 bdev = allocate_bdev("bdev0"); 4808 4809 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4810 CU_ASSERT(rc == 0); 4811 CU_ASSERT(desc != NULL); 4812 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4813 io_ch = spdk_bdev_get_io_channel(desc); 4814 CU_ASSERT(io_ch != NULL); 4815 channel = spdk_io_channel_get_ctx(io_ch); 4816 4817 g_io_done = false; 4818 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4819 CU_ASSERT(rc == 0); 4820 4821 g_lock_lba_range_done = false; 4822 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4823 CU_ASSERT(rc == 0); 4824 poll_threads(); 4825 4826 /* The lock should immediately become valid, since there are no outstanding 4827 * write I/O. 4828 */ 4829 CU_ASSERT(g_io_done == false); 4830 CU_ASSERT(g_lock_lba_range_done == true); 4831 range = TAILQ_FIRST(&channel->locked_ranges); 4832 SPDK_CU_ASSERT_FATAL(range != NULL); 4833 CU_ASSERT(range->offset == 20); 4834 CU_ASSERT(range->length == 10); 4835 CU_ASSERT(range->owner_ch == channel); 4836 CU_ASSERT(range->locked_ctx == &ctx1); 4837 4838 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4839 CU_ASSERT(rc == 0); 4840 stub_complete_io(1); 4841 spdk_delay_us(100); 4842 poll_threads(); 4843 4844 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4845 4846 /* Now try again, but with a write I/O. */ 4847 g_io_done = false; 4848 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4849 CU_ASSERT(rc == 0); 4850 4851 g_lock_lba_range_done = false; 4852 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4853 CU_ASSERT(rc == 0); 4854 poll_threads(); 4855 4856 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4857 * But note that the range should be on the channel's locked_list, to make sure no 4858 * new write I/O are started. 4859 */ 4860 CU_ASSERT(g_io_done == false); 4861 CU_ASSERT(g_lock_lba_range_done == false); 4862 range = TAILQ_FIRST(&channel->locked_ranges); 4863 SPDK_CU_ASSERT_FATAL(range != NULL); 4864 CU_ASSERT(range->offset == 20); 4865 CU_ASSERT(range->length == 10); 4866 4867 /* Complete the write I/O. This should make the lock valid (checked by confirming 4868 * our callback was invoked). 4869 */ 4870 stub_complete_io(1); 4871 spdk_delay_us(100); 4872 poll_threads(); 4873 CU_ASSERT(g_io_done == true); 4874 CU_ASSERT(g_lock_lba_range_done == true); 4875 4876 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4877 CU_ASSERT(rc == 0); 4878 poll_threads(); 4879 4880 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4881 4882 spdk_put_io_channel(io_ch); 4883 spdk_bdev_close(desc); 4884 free_bdev(bdev); 4885 ut_fini_bdev(); 4886 } 4887 4888 static void 4889 lock_lba_range_overlapped(void) 4890 { 4891 struct spdk_bdev *bdev; 4892 struct spdk_bdev_desc *desc = NULL; 4893 struct spdk_io_channel *io_ch; 4894 struct spdk_bdev_channel *channel; 4895 struct lba_range *range; 4896 int ctx1; 4897 int rc; 4898 4899 ut_init_bdev(NULL); 4900 bdev = allocate_bdev("bdev0"); 4901 4902 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4903 CU_ASSERT(rc == 0); 4904 CU_ASSERT(desc != NULL); 4905 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4906 io_ch = spdk_bdev_get_io_channel(desc); 4907 CU_ASSERT(io_ch != NULL); 4908 channel = spdk_io_channel_get_ctx(io_ch); 4909 4910 /* Lock range 20-29. */ 4911 g_lock_lba_range_done = false; 4912 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4913 CU_ASSERT(rc == 0); 4914 poll_threads(); 4915 4916 CU_ASSERT(g_lock_lba_range_done == true); 4917 range = TAILQ_FIRST(&channel->locked_ranges); 4918 SPDK_CU_ASSERT_FATAL(range != NULL); 4919 CU_ASSERT(range->offset == 20); 4920 CU_ASSERT(range->length == 10); 4921 4922 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4923 * 20-29. 4924 */ 4925 g_lock_lba_range_done = false; 4926 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4927 CU_ASSERT(rc == 0); 4928 poll_threads(); 4929 4930 CU_ASSERT(g_lock_lba_range_done == false); 4931 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4932 SPDK_CU_ASSERT_FATAL(range != NULL); 4933 CU_ASSERT(range->offset == 25); 4934 CU_ASSERT(range->length == 15); 4935 4936 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4937 * no longer overlaps with an active lock. 4938 */ 4939 g_unlock_lba_range_done = false; 4940 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4941 CU_ASSERT(rc == 0); 4942 poll_threads(); 4943 4944 CU_ASSERT(g_unlock_lba_range_done == true); 4945 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4946 range = TAILQ_FIRST(&channel->locked_ranges); 4947 SPDK_CU_ASSERT_FATAL(range != NULL); 4948 CU_ASSERT(range->offset == 25); 4949 CU_ASSERT(range->length == 15); 4950 4951 /* Lock 40-59. This should immediately lock since it does not overlap with the 4952 * currently active 25-39 lock. 4953 */ 4954 g_lock_lba_range_done = false; 4955 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4956 CU_ASSERT(rc == 0); 4957 poll_threads(); 4958 4959 CU_ASSERT(g_lock_lba_range_done == true); 4960 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4961 SPDK_CU_ASSERT_FATAL(range != NULL); 4962 range = TAILQ_NEXT(range, tailq); 4963 SPDK_CU_ASSERT_FATAL(range != NULL); 4964 CU_ASSERT(range->offset == 40); 4965 CU_ASSERT(range->length == 20); 4966 4967 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4968 g_lock_lba_range_done = false; 4969 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4970 CU_ASSERT(rc == 0); 4971 poll_threads(); 4972 4973 CU_ASSERT(g_lock_lba_range_done == false); 4974 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4975 SPDK_CU_ASSERT_FATAL(range != NULL); 4976 CU_ASSERT(range->offset == 35); 4977 CU_ASSERT(range->length == 10); 4978 4979 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4980 * the 40-59 lock is still active. 4981 */ 4982 g_unlock_lba_range_done = false; 4983 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4984 CU_ASSERT(rc == 0); 4985 poll_threads(); 4986 4987 CU_ASSERT(g_unlock_lba_range_done == true); 4988 CU_ASSERT(g_lock_lba_range_done == false); 4989 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4990 SPDK_CU_ASSERT_FATAL(range != NULL); 4991 CU_ASSERT(range->offset == 35); 4992 CU_ASSERT(range->length == 10); 4993 4994 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4995 * no longer any active overlapping locks. 4996 */ 4997 g_unlock_lba_range_done = false; 4998 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4999 CU_ASSERT(rc == 0); 5000 poll_threads(); 5001 5002 CU_ASSERT(g_unlock_lba_range_done == true); 5003 CU_ASSERT(g_lock_lba_range_done == true); 5004 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5005 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5006 SPDK_CU_ASSERT_FATAL(range != NULL); 5007 CU_ASSERT(range->offset == 35); 5008 CU_ASSERT(range->length == 10); 5009 5010 /* Finally, unlock 35-44. */ 5011 g_unlock_lba_range_done = false; 5012 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 5013 CU_ASSERT(rc == 0); 5014 poll_threads(); 5015 5016 CU_ASSERT(g_unlock_lba_range_done == true); 5017 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 5018 5019 spdk_put_io_channel(io_ch); 5020 spdk_bdev_close(desc); 5021 free_bdev(bdev); 5022 ut_fini_bdev(); 5023 } 5024 5025 static void 5026 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 5027 { 5028 g_abort_done = true; 5029 g_abort_status = bdev_io->internal.status; 5030 spdk_bdev_free_io(bdev_io); 5031 } 5032 5033 static void 5034 bdev_io_abort(void) 5035 { 5036 struct spdk_bdev *bdev; 5037 struct spdk_bdev_desc *desc = NULL; 5038 struct spdk_io_channel *io_ch; 5039 struct spdk_bdev_channel *channel; 5040 struct spdk_bdev_mgmt_channel *mgmt_ch; 5041 struct spdk_bdev_opts bdev_opts = {}; 5042 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 5043 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5044 int rc; 5045 5046 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5047 bdev_opts.bdev_io_pool_size = 7; 5048 bdev_opts.bdev_io_cache_size = 2; 5049 ut_init_bdev(&bdev_opts); 5050 5051 bdev = allocate_bdev("bdev0"); 5052 5053 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5054 CU_ASSERT(rc == 0); 5055 CU_ASSERT(desc != NULL); 5056 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5057 io_ch = spdk_bdev_get_io_channel(desc); 5058 CU_ASSERT(io_ch != NULL); 5059 channel = spdk_io_channel_get_ctx(io_ch); 5060 mgmt_ch = channel->shared_resource->mgmt_ch; 5061 5062 g_abort_done = false; 5063 5064 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5065 5066 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5067 CU_ASSERT(rc == -ENOTSUP); 5068 5069 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5070 5071 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5072 CU_ASSERT(rc == 0); 5073 CU_ASSERT(g_abort_done == true); 5074 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5075 5076 /* Test the case that the target I/O was successfully aborted. */ 5077 g_io_done = false; 5078 5079 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5080 CU_ASSERT(rc == 0); 5081 CU_ASSERT(g_io_done == false); 5082 5083 g_abort_done = false; 5084 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5085 5086 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5087 CU_ASSERT(rc == 0); 5088 CU_ASSERT(g_io_done == true); 5089 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5090 stub_complete_io(1); 5091 CU_ASSERT(g_abort_done == true); 5092 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5093 5094 /* Test the case that the target I/O was not aborted because it completed 5095 * in the middle of execution of the abort. 5096 */ 5097 g_io_done = false; 5098 5099 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5100 CU_ASSERT(rc == 0); 5101 CU_ASSERT(g_io_done == false); 5102 5103 g_abort_done = false; 5104 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5105 5106 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5107 CU_ASSERT(rc == 0); 5108 CU_ASSERT(g_io_done == false); 5109 5110 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5111 stub_complete_io(1); 5112 CU_ASSERT(g_io_done == true); 5113 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5114 5115 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5116 stub_complete_io(1); 5117 CU_ASSERT(g_abort_done == true); 5118 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5119 5120 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5121 5122 bdev->optimal_io_boundary = 16; 5123 bdev->split_on_optimal_io_boundary = true; 5124 5125 /* Test that a single-vector command which is split is aborted correctly. 5126 * Offset 14, length 8, payload 0xF000 5127 * Child - Offset 14, length 2, payload 0xF000 5128 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5129 */ 5130 g_io_done = false; 5131 5132 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5133 CU_ASSERT(rc == 0); 5134 CU_ASSERT(g_io_done == false); 5135 5136 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5137 5138 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5139 5140 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5141 CU_ASSERT(rc == 0); 5142 CU_ASSERT(g_io_done == true); 5143 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5144 stub_complete_io(2); 5145 CU_ASSERT(g_abort_done == true); 5146 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5147 5148 /* Test that a multi-vector command that needs to be split by strip and then 5149 * needs to be split is aborted correctly. Abort is requested before the second 5150 * child I/O was submitted. The parent I/O should complete with failure without 5151 * submitting the second child I/O. 5152 */ 5153 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5154 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5155 iov[i].iov_len = 512; 5156 } 5157 5158 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5159 g_io_done = false; 5160 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5161 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5162 CU_ASSERT(rc == 0); 5163 CU_ASSERT(g_io_done == false); 5164 5165 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5166 5167 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5168 5169 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5170 CU_ASSERT(rc == 0); 5171 CU_ASSERT(g_io_done == true); 5172 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5173 stub_complete_io(1); 5174 CU_ASSERT(g_abort_done == true); 5175 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5176 5177 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5178 5179 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5180 5181 bdev->optimal_io_boundary = 16; 5182 g_io_done = false; 5183 5184 /* Test that a ingle-vector command which is split is aborted correctly. 5185 * Differently from the above, the child abort request will be submitted 5186 * sequentially due to the capacity of spdk_bdev_io. 5187 */ 5188 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5189 CU_ASSERT(rc == 0); 5190 CU_ASSERT(g_io_done == false); 5191 5192 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5193 5194 g_abort_done = false; 5195 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5196 5197 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5198 CU_ASSERT(rc == 0); 5199 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5201 5202 stub_complete_io(1); 5203 CU_ASSERT(g_io_done == true); 5204 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5205 stub_complete_io(3); 5206 CU_ASSERT(g_abort_done == true); 5207 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5208 5209 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5210 5211 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5212 5213 spdk_put_io_channel(io_ch); 5214 spdk_bdev_close(desc); 5215 free_bdev(bdev); 5216 ut_fini_bdev(); 5217 } 5218 5219 static void 5220 bdev_unmap(void) 5221 { 5222 struct spdk_bdev *bdev; 5223 struct spdk_bdev_desc *desc = NULL; 5224 struct spdk_io_channel *ioch; 5225 struct spdk_bdev_channel *bdev_ch; 5226 struct ut_expected_io *expected_io; 5227 struct spdk_bdev_opts bdev_opts = {}; 5228 uint32_t i, num_outstanding; 5229 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5230 int rc; 5231 5232 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5233 bdev_opts.bdev_io_pool_size = 512; 5234 bdev_opts.bdev_io_cache_size = 64; 5235 ut_init_bdev(&bdev_opts); 5236 5237 bdev = allocate_bdev("bdev"); 5238 5239 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5240 CU_ASSERT_EQUAL(rc, 0); 5241 SPDK_CU_ASSERT_FATAL(desc != NULL); 5242 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5243 ioch = spdk_bdev_get_io_channel(desc); 5244 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5245 bdev_ch = spdk_io_channel_get_ctx(ioch); 5246 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5247 5248 fn_table.submit_request = stub_submit_request; 5249 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5250 5251 /* Case 1: First test the request won't be split */ 5252 num_blocks = 32; 5253 5254 g_io_done = false; 5255 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5256 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5257 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5258 CU_ASSERT_EQUAL(rc, 0); 5259 CU_ASSERT(g_io_done == false); 5260 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5261 stub_complete_io(1); 5262 CU_ASSERT(g_io_done == true); 5263 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5264 5265 /* Case 2: Test the split with 2 children requests */ 5266 bdev->max_unmap = 8; 5267 bdev->max_unmap_segments = 2; 5268 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5269 num_blocks = max_unmap_blocks * 2; 5270 offset = 0; 5271 5272 g_io_done = false; 5273 for (i = 0; i < 2; i++) { 5274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5275 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5276 offset += max_unmap_blocks; 5277 } 5278 5279 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5280 CU_ASSERT_EQUAL(rc, 0); 5281 CU_ASSERT(g_io_done == false); 5282 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5283 stub_complete_io(2); 5284 CU_ASSERT(g_io_done == true); 5285 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5286 5287 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5288 num_children = 15; 5289 num_blocks = max_unmap_blocks * num_children; 5290 g_io_done = false; 5291 offset = 0; 5292 for (i = 0; i < num_children; i++) { 5293 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5294 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5295 offset += max_unmap_blocks; 5296 } 5297 5298 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5299 CU_ASSERT_EQUAL(rc, 0); 5300 CU_ASSERT(g_io_done == false); 5301 5302 while (num_children > 0) { 5303 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5304 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5305 stub_complete_io(num_outstanding); 5306 num_children -= num_outstanding; 5307 } 5308 CU_ASSERT(g_io_done == true); 5309 5310 spdk_put_io_channel(ioch); 5311 spdk_bdev_close(desc); 5312 free_bdev(bdev); 5313 ut_fini_bdev(); 5314 } 5315 5316 static void 5317 bdev_write_zeroes_split_test(void) 5318 { 5319 struct spdk_bdev *bdev; 5320 struct spdk_bdev_desc *desc = NULL; 5321 struct spdk_io_channel *ioch; 5322 struct spdk_bdev_channel *bdev_ch; 5323 struct ut_expected_io *expected_io; 5324 struct spdk_bdev_opts bdev_opts = {}; 5325 uint32_t i, num_outstanding; 5326 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5327 int rc; 5328 5329 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5330 bdev_opts.bdev_io_pool_size = 512; 5331 bdev_opts.bdev_io_cache_size = 64; 5332 ut_init_bdev(&bdev_opts); 5333 5334 bdev = allocate_bdev("bdev"); 5335 5336 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5337 CU_ASSERT_EQUAL(rc, 0); 5338 SPDK_CU_ASSERT_FATAL(desc != NULL); 5339 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5340 ioch = spdk_bdev_get_io_channel(desc); 5341 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5342 bdev_ch = spdk_io_channel_get_ctx(ioch); 5343 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5344 5345 fn_table.submit_request = stub_submit_request; 5346 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5347 5348 /* Case 1: First test the request won't be split */ 5349 num_blocks = 32; 5350 5351 g_io_done = false; 5352 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5353 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5354 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5355 CU_ASSERT_EQUAL(rc, 0); 5356 CU_ASSERT(g_io_done == false); 5357 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5358 stub_complete_io(1); 5359 CU_ASSERT(g_io_done == true); 5360 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5361 5362 /* Case 2: Test the split with 2 children requests */ 5363 max_write_zeroes_blocks = 8; 5364 bdev->max_write_zeroes = max_write_zeroes_blocks; 5365 num_blocks = max_write_zeroes_blocks * 2; 5366 offset = 0; 5367 5368 g_io_done = false; 5369 for (i = 0; i < 2; i++) { 5370 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5371 0); 5372 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5373 offset += max_write_zeroes_blocks; 5374 } 5375 5376 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5377 CU_ASSERT_EQUAL(rc, 0); 5378 CU_ASSERT(g_io_done == false); 5379 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5380 stub_complete_io(2); 5381 CU_ASSERT(g_io_done == true); 5382 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5383 5384 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5385 num_children = 15; 5386 num_blocks = max_write_zeroes_blocks * num_children; 5387 g_io_done = false; 5388 offset = 0; 5389 for (i = 0; i < num_children; i++) { 5390 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5391 0); 5392 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5393 offset += max_write_zeroes_blocks; 5394 } 5395 5396 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5397 CU_ASSERT_EQUAL(rc, 0); 5398 CU_ASSERT(g_io_done == false); 5399 5400 while (num_children > 0) { 5401 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5402 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5403 stub_complete_io(num_outstanding); 5404 num_children -= num_outstanding; 5405 } 5406 CU_ASSERT(g_io_done == true); 5407 5408 spdk_put_io_channel(ioch); 5409 spdk_bdev_close(desc); 5410 free_bdev(bdev); 5411 ut_fini_bdev(); 5412 } 5413 5414 static void 5415 bdev_set_options_test(void) 5416 { 5417 struct spdk_bdev_opts bdev_opts = {}; 5418 int rc; 5419 5420 /* Case1: Do not set opts_size */ 5421 rc = spdk_bdev_set_opts(&bdev_opts); 5422 CU_ASSERT(rc == -1); 5423 5424 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5425 bdev_opts.bdev_io_pool_size = 4; 5426 bdev_opts.bdev_io_cache_size = 2; 5427 bdev_opts.small_buf_pool_size = 4; 5428 5429 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5430 rc = spdk_bdev_set_opts(&bdev_opts); 5431 CU_ASSERT(rc == -1); 5432 5433 /* Case 3: Do not set valid large_buf_pool_size */ 5434 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5435 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5436 rc = spdk_bdev_set_opts(&bdev_opts); 5437 CU_ASSERT(rc == -1); 5438 5439 /* Case4: set valid large buf_pool_size */ 5440 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5441 rc = spdk_bdev_set_opts(&bdev_opts); 5442 CU_ASSERT(rc == 0); 5443 5444 /* Case5: Set different valid value for small and large buf pool */ 5445 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5446 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5447 rc = spdk_bdev_set_opts(&bdev_opts); 5448 CU_ASSERT(rc == 0); 5449 } 5450 5451 static uint64_t 5452 get_ns_time(void) 5453 { 5454 int rc; 5455 struct timespec ts; 5456 5457 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5458 CU_ASSERT(rc == 0); 5459 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5460 } 5461 5462 static int 5463 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5464 { 5465 int h1, h2; 5466 5467 if (bdev_name == NULL) { 5468 return -1; 5469 } else { 5470 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5471 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5472 5473 return spdk_max(h1, h2) + 1; 5474 } 5475 } 5476 5477 static void 5478 bdev_multi_allocation(void) 5479 { 5480 const int max_bdev_num = 1024 * 16; 5481 char name[max_bdev_num][16]; 5482 char noexist_name[] = "invalid_bdev"; 5483 struct spdk_bdev *bdev[max_bdev_num]; 5484 int i, j; 5485 uint64_t last_time; 5486 int bdev_num; 5487 int height; 5488 5489 for (j = 0; j < max_bdev_num; j++) { 5490 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5491 } 5492 5493 for (i = 0; i < 16; i++) { 5494 last_time = get_ns_time(); 5495 bdev_num = 1024 * (i + 1); 5496 for (j = 0; j < bdev_num; j++) { 5497 bdev[j] = allocate_bdev(name[j]); 5498 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5499 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5500 } 5501 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5502 (get_ns_time() - last_time) / 1000 / 1000); 5503 for (j = 0; j < bdev_num; j++) { 5504 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5505 } 5506 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5507 5508 for (j = 0; j < bdev_num; j++) { 5509 free_bdev(bdev[j]); 5510 } 5511 for (j = 0; j < bdev_num; j++) { 5512 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5513 } 5514 } 5515 } 5516 5517 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5518 5519 static int 5520 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5521 int array_size) 5522 { 5523 if (array_size > 0 && domains) { 5524 domains[0] = g_bdev_memory_domain; 5525 } 5526 5527 return 1; 5528 } 5529 5530 static void 5531 bdev_get_memory_domains(void) 5532 { 5533 struct spdk_bdev_fn_table fn_table = { 5534 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5535 }; 5536 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5537 struct spdk_memory_domain *domains[2] = {}; 5538 int rc; 5539 5540 /* bdev is NULL */ 5541 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5542 CU_ASSERT(rc == -EINVAL); 5543 5544 /* domains is NULL */ 5545 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5546 CU_ASSERT(rc == 1); 5547 5548 /* array size is 0 */ 5549 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5550 CU_ASSERT(rc == 1); 5551 5552 /* get_supported_dma_device_types op is set */ 5553 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5554 CU_ASSERT(rc == 1); 5555 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5556 5557 /* get_supported_dma_device_types op is not set */ 5558 fn_table.get_memory_domains = NULL; 5559 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5560 CU_ASSERT(rc == 0); 5561 } 5562 5563 static void 5564 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5565 { 5566 struct spdk_bdev *bdev; 5567 struct spdk_bdev_desc *desc = NULL; 5568 struct spdk_io_channel *io_ch; 5569 char io_buf[512]; 5570 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5571 struct ut_expected_io *expected_io; 5572 int rc; 5573 5574 ut_init_bdev(NULL); 5575 5576 bdev = allocate_bdev("bdev0"); 5577 bdev->md_interleave = false; 5578 bdev->md_len = 8; 5579 5580 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5581 CU_ASSERT(rc == 0); 5582 SPDK_CU_ASSERT_FATAL(desc != NULL); 5583 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5584 io_ch = spdk_bdev_get_io_channel(desc); 5585 CU_ASSERT(io_ch != NULL); 5586 5587 /* read */ 5588 g_io_done = false; 5589 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5590 if (ext_io_opts) { 5591 expected_io->md_buf = ext_io_opts->metadata; 5592 } 5593 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5594 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5595 5596 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5597 5598 CU_ASSERT(rc == 0); 5599 CU_ASSERT(g_io_done == false); 5600 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5601 stub_complete_io(1); 5602 CU_ASSERT(g_io_done == true); 5603 5604 /* write */ 5605 g_io_done = false; 5606 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5607 if (ext_io_opts) { 5608 expected_io->md_buf = ext_io_opts->metadata; 5609 } 5610 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5611 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5612 5613 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5614 5615 CU_ASSERT(rc == 0); 5616 CU_ASSERT(g_io_done == false); 5617 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5618 stub_complete_io(1); 5619 CU_ASSERT(g_io_done == true); 5620 5621 spdk_put_io_channel(io_ch); 5622 spdk_bdev_close(desc); 5623 free_bdev(bdev); 5624 ut_fini_bdev(); 5625 5626 } 5627 5628 static void 5629 bdev_io_ext(void) 5630 { 5631 struct spdk_bdev_ext_io_opts ext_io_opts = { 5632 .metadata = (void *)0xFF000000, 5633 .size = sizeof(ext_io_opts) 5634 }; 5635 5636 _bdev_io_ext(&ext_io_opts); 5637 } 5638 5639 static void 5640 bdev_io_ext_no_opts(void) 5641 { 5642 _bdev_io_ext(NULL); 5643 } 5644 5645 static void 5646 bdev_io_ext_invalid_opts(void) 5647 { 5648 struct spdk_bdev *bdev; 5649 struct spdk_bdev_desc *desc = NULL; 5650 struct spdk_io_channel *io_ch; 5651 char io_buf[512]; 5652 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5653 struct spdk_bdev_ext_io_opts ext_io_opts = { 5654 .metadata = (void *)0xFF000000, 5655 .size = sizeof(ext_io_opts) 5656 }; 5657 int rc; 5658 5659 ut_init_bdev(NULL); 5660 5661 bdev = allocate_bdev("bdev0"); 5662 bdev->md_interleave = false; 5663 bdev->md_len = 8; 5664 5665 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5666 CU_ASSERT(rc == 0); 5667 SPDK_CU_ASSERT_FATAL(desc != NULL); 5668 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5669 io_ch = spdk_bdev_get_io_channel(desc); 5670 CU_ASSERT(io_ch != NULL); 5671 5672 /* Test invalid ext_opts size */ 5673 ext_io_opts.size = 0; 5674 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5675 CU_ASSERT(rc == -EINVAL); 5676 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5677 CU_ASSERT(rc == -EINVAL); 5678 5679 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5680 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5681 CU_ASSERT(rc == -EINVAL); 5682 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5683 CU_ASSERT(rc == -EINVAL); 5684 5685 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5686 sizeof(ext_io_opts.metadata) - 1; 5687 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5688 CU_ASSERT(rc == -EINVAL); 5689 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5690 CU_ASSERT(rc == -EINVAL); 5691 5692 spdk_put_io_channel(io_ch); 5693 spdk_bdev_close(desc); 5694 free_bdev(bdev); 5695 ut_fini_bdev(); 5696 } 5697 5698 static void 5699 bdev_io_ext_split(void) 5700 { 5701 struct spdk_bdev *bdev; 5702 struct spdk_bdev_desc *desc = NULL; 5703 struct spdk_io_channel *io_ch; 5704 char io_buf[512]; 5705 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5706 struct ut_expected_io *expected_io; 5707 struct spdk_bdev_ext_io_opts ext_io_opts = { 5708 .metadata = (void *)0xFF000000, 5709 .size = sizeof(ext_io_opts) 5710 }; 5711 int rc; 5712 5713 ut_init_bdev(NULL); 5714 5715 bdev = allocate_bdev("bdev0"); 5716 bdev->md_interleave = false; 5717 bdev->md_len = 8; 5718 5719 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5720 CU_ASSERT(rc == 0); 5721 SPDK_CU_ASSERT_FATAL(desc != NULL); 5722 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5723 io_ch = spdk_bdev_get_io_channel(desc); 5724 CU_ASSERT(io_ch != NULL); 5725 5726 /* Check that IO request with ext_opts and metadata is split correctly 5727 * Offset 14, length 8, payload 0xF000 5728 * Child - Offset 14, length 2, payload 0xF000 5729 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5730 */ 5731 bdev->optimal_io_boundary = 16; 5732 bdev->split_on_optimal_io_boundary = true; 5733 bdev->md_interleave = false; 5734 bdev->md_len = 8; 5735 5736 iov.iov_base = (void *)0xF000; 5737 iov.iov_len = 4096; 5738 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5739 ext_io_opts.metadata = (void *)0xFF000000; 5740 ext_io_opts.size = sizeof(ext_io_opts); 5741 g_io_done = false; 5742 5743 /* read */ 5744 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5745 expected_io->md_buf = ext_io_opts.metadata; 5746 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5747 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5748 5749 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5750 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5751 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5752 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5753 5754 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5755 CU_ASSERT(rc == 0); 5756 CU_ASSERT(g_io_done == false); 5757 5758 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5759 stub_complete_io(2); 5760 CU_ASSERT(g_io_done == true); 5761 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5762 5763 /* write */ 5764 g_io_done = false; 5765 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5766 expected_io->md_buf = ext_io_opts.metadata; 5767 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5768 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5769 5770 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5771 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5772 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5773 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5774 5775 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5776 CU_ASSERT(rc == 0); 5777 CU_ASSERT(g_io_done == false); 5778 5779 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5780 stub_complete_io(2); 5781 CU_ASSERT(g_io_done == true); 5782 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5783 5784 spdk_put_io_channel(io_ch); 5785 spdk_bdev_close(desc); 5786 free_bdev(bdev); 5787 ut_fini_bdev(); 5788 } 5789 5790 static void 5791 bdev_io_ext_bounce_buffer(void) 5792 { 5793 struct spdk_bdev *bdev; 5794 struct spdk_bdev_desc *desc = NULL; 5795 struct spdk_io_channel *io_ch; 5796 char io_buf[512]; 5797 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5798 struct ut_expected_io *expected_io; 5799 struct spdk_bdev_ext_io_opts ext_io_opts = { 5800 .metadata = (void *)0xFF000000, 5801 .size = sizeof(ext_io_opts) 5802 }; 5803 int rc; 5804 5805 ut_init_bdev(NULL); 5806 5807 bdev = allocate_bdev("bdev0"); 5808 bdev->md_interleave = false; 5809 bdev->md_len = 8; 5810 5811 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5812 CU_ASSERT(rc == 0); 5813 SPDK_CU_ASSERT_FATAL(desc != NULL); 5814 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5815 io_ch = spdk_bdev_get_io_channel(desc); 5816 CU_ASSERT(io_ch != NULL); 5817 5818 /* Verify data pull/push 5819 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5820 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5821 5822 /* read */ 5823 g_io_done = false; 5824 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5825 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5826 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5827 5828 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5829 5830 CU_ASSERT(rc == 0); 5831 CU_ASSERT(g_io_done == false); 5832 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5833 stub_complete_io(1); 5834 CU_ASSERT(g_memory_domain_push_data_called == true); 5835 CU_ASSERT(g_io_done == true); 5836 5837 /* write */ 5838 g_io_done = false; 5839 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5840 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5841 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5842 5843 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5844 5845 CU_ASSERT(rc == 0); 5846 CU_ASSERT(g_memory_domain_pull_data_called == true); 5847 CU_ASSERT(g_io_done == false); 5848 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5849 stub_complete_io(1); 5850 CU_ASSERT(g_io_done == true); 5851 5852 spdk_put_io_channel(io_ch); 5853 spdk_bdev_close(desc); 5854 free_bdev(bdev); 5855 ut_fini_bdev(); 5856 } 5857 5858 static void 5859 bdev_register_uuid_alias(void) 5860 { 5861 struct spdk_bdev *bdev, *second; 5862 char uuid[SPDK_UUID_STRING_LEN]; 5863 int rc; 5864 5865 ut_init_bdev(NULL); 5866 bdev = allocate_bdev("bdev0"); 5867 5868 /* Make sure an UUID was generated */ 5869 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5870 5871 /* Check that an UUID alias was registered */ 5872 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5873 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5874 5875 /* Unregister the bdev */ 5876 spdk_bdev_unregister(bdev, NULL, NULL); 5877 poll_threads(); 5878 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5879 5880 /* Check the same, but this time register the bdev with non-zero UUID */ 5881 rc = spdk_bdev_register(bdev); 5882 CU_ASSERT_EQUAL(rc, 0); 5883 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5884 5885 /* Unregister the bdev */ 5886 spdk_bdev_unregister(bdev, NULL, NULL); 5887 poll_threads(); 5888 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5889 5890 /* Regiser the bdev using UUID as the name */ 5891 bdev->name = uuid; 5892 rc = spdk_bdev_register(bdev); 5893 CU_ASSERT_EQUAL(rc, 0); 5894 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5895 5896 /* Unregister the bdev */ 5897 spdk_bdev_unregister(bdev, NULL, NULL); 5898 poll_threads(); 5899 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5900 5901 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5902 bdev->name = "bdev0"; 5903 second = allocate_bdev("bdev1"); 5904 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5905 rc = spdk_bdev_register(bdev); 5906 CU_ASSERT_EQUAL(rc, -EEXIST); 5907 5908 /* Regenerate the UUID and re-check */ 5909 spdk_uuid_generate(&bdev->uuid); 5910 rc = spdk_bdev_register(bdev); 5911 CU_ASSERT_EQUAL(rc, 0); 5912 5913 /* And check that both bdevs can be retrieved through their UUIDs */ 5914 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5915 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5916 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5917 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5918 5919 free_bdev(second); 5920 free_bdev(bdev); 5921 ut_fini_bdev(); 5922 } 5923 5924 static void 5925 bdev_unregister_by_name(void) 5926 { 5927 struct spdk_bdev *bdev; 5928 int rc; 5929 5930 bdev = allocate_bdev("bdev"); 5931 5932 g_event_type1 = 0xFF; 5933 g_unregister_arg = NULL; 5934 g_unregister_rc = -1; 5935 5936 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5937 CU_ASSERT(rc == -ENODEV); 5938 5939 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5940 CU_ASSERT(rc == -ENODEV); 5941 5942 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5943 CU_ASSERT(rc == 0); 5944 5945 /* Check that unregister callback is delayed */ 5946 CU_ASSERT(g_unregister_arg == NULL); 5947 CU_ASSERT(g_unregister_rc == -1); 5948 5949 poll_threads(); 5950 5951 /* Event callback shall not be issued because device was closed */ 5952 CU_ASSERT(g_event_type1 == 0xFF); 5953 /* Unregister callback is issued */ 5954 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5955 CU_ASSERT(g_unregister_rc == 0); 5956 5957 free_bdev(bdev); 5958 } 5959 5960 static int 5961 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5962 { 5963 int *count = ctx; 5964 5965 (*count)++; 5966 5967 return 0; 5968 } 5969 5970 static void 5971 for_each_bdev_test(void) 5972 { 5973 struct spdk_bdev *bdev[8]; 5974 int rc, count; 5975 5976 bdev[0] = allocate_bdev("bdev0"); 5977 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 5978 5979 bdev[1] = allocate_bdev("bdev1"); 5980 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5981 CU_ASSERT(rc == 0); 5982 5983 bdev[2] = allocate_bdev("bdev2"); 5984 5985 bdev[3] = allocate_bdev("bdev3"); 5986 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5987 CU_ASSERT(rc == 0); 5988 5989 bdev[4] = allocate_bdev("bdev4"); 5990 5991 bdev[5] = allocate_bdev("bdev5"); 5992 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5993 CU_ASSERT(rc == 0); 5994 5995 bdev[6] = allocate_bdev("bdev6"); 5996 5997 bdev[7] = allocate_bdev("bdev7"); 5998 5999 count = 0; 6000 rc = spdk_for_each_bdev(&count, count_bdevs); 6001 CU_ASSERT(rc == 0); 6002 CU_ASSERT(count == 7); 6003 6004 count = 0; 6005 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 6006 CU_ASSERT(rc == 0); 6007 CU_ASSERT(count == 4); 6008 6009 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 6010 free_bdev(bdev[0]); 6011 free_bdev(bdev[1]); 6012 free_bdev(bdev[2]); 6013 free_bdev(bdev[3]); 6014 free_bdev(bdev[4]); 6015 free_bdev(bdev[5]); 6016 free_bdev(bdev[6]); 6017 free_bdev(bdev[7]); 6018 } 6019 6020 static void 6021 bdev_seek_test(void) 6022 { 6023 struct spdk_bdev *bdev; 6024 struct spdk_bdev_desc *desc = NULL; 6025 struct spdk_io_channel *io_ch; 6026 int rc; 6027 6028 ut_init_bdev(NULL); 6029 poll_threads(); 6030 6031 bdev = allocate_bdev("bdev0"); 6032 6033 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6034 CU_ASSERT(rc == 0); 6035 poll_threads(); 6036 SPDK_CU_ASSERT_FATAL(desc != NULL); 6037 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6038 io_ch = spdk_bdev_get_io_channel(desc); 6039 CU_ASSERT(io_ch != NULL); 6040 6041 /* Seek data not supported */ 6042 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6043 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6044 CU_ASSERT(rc == 0); 6045 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6046 poll_threads(); 6047 CU_ASSERT(g_seek_offset == 0); 6048 6049 /* Seek hole not supported */ 6050 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6051 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6052 CU_ASSERT(rc == 0); 6053 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6054 poll_threads(); 6055 CU_ASSERT(g_seek_offset == UINT64_MAX); 6056 6057 /* Seek data supported */ 6058 g_seek_data_offset = 12345; 6059 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6060 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6061 CU_ASSERT(rc == 0); 6062 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6063 stub_complete_io(1); 6064 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6065 CU_ASSERT(g_seek_offset == 12345); 6066 6067 /* Seek hole supported */ 6068 g_seek_hole_offset = 67890; 6069 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6070 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6071 CU_ASSERT(rc == 0); 6072 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6073 stub_complete_io(1); 6074 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6075 CU_ASSERT(g_seek_offset == 67890); 6076 6077 spdk_put_io_channel(io_ch); 6078 spdk_bdev_close(desc); 6079 free_bdev(bdev); 6080 ut_fini_bdev(); 6081 } 6082 6083 static void 6084 bdev_copy(void) 6085 { 6086 struct spdk_bdev *bdev; 6087 struct spdk_bdev_desc *desc = NULL; 6088 struct spdk_io_channel *ioch; 6089 struct ut_expected_io *expected_io; 6090 uint64_t src_offset, num_blocks; 6091 uint32_t num_completed; 6092 int rc; 6093 6094 ut_init_bdev(NULL); 6095 bdev = allocate_bdev("bdev"); 6096 6097 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6098 CU_ASSERT_EQUAL(rc, 0); 6099 SPDK_CU_ASSERT_FATAL(desc != NULL); 6100 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6101 ioch = spdk_bdev_get_io_channel(desc); 6102 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6103 6104 fn_table.submit_request = stub_submit_request; 6105 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6106 6107 /* First test that if the bdev supports copy, the request won't be split */ 6108 bdev->md_len = 0; 6109 bdev->blocklen = 512; 6110 num_blocks = 128; 6111 src_offset = bdev->blockcnt - num_blocks; 6112 6113 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6114 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6115 6116 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6117 CU_ASSERT_EQUAL(rc, 0); 6118 num_completed = stub_complete_io(1); 6119 CU_ASSERT_EQUAL(num_completed, 1); 6120 6121 /* Check that if copy is not supported it'll still work */ 6122 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6123 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6124 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6125 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6126 6127 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6128 6129 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6130 CU_ASSERT_EQUAL(rc, 0); 6131 num_completed = stub_complete_io(1); 6132 CU_ASSERT_EQUAL(num_completed, 1); 6133 num_completed = stub_complete_io(1); 6134 CU_ASSERT_EQUAL(num_completed, 1); 6135 6136 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6137 spdk_put_io_channel(ioch); 6138 spdk_bdev_close(desc); 6139 free_bdev(bdev); 6140 ut_fini_bdev(); 6141 } 6142 6143 static void 6144 bdev_copy_split_test(void) 6145 { 6146 struct spdk_bdev *bdev; 6147 struct spdk_bdev_desc *desc = NULL; 6148 struct spdk_io_channel *ioch; 6149 struct spdk_bdev_channel *bdev_ch; 6150 struct ut_expected_io *expected_io; 6151 struct spdk_bdev_opts bdev_opts = {}; 6152 uint32_t i, num_outstanding; 6153 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6154 int rc; 6155 6156 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6157 bdev_opts.bdev_io_pool_size = 512; 6158 bdev_opts.bdev_io_cache_size = 64; 6159 rc = spdk_bdev_set_opts(&bdev_opts); 6160 CU_ASSERT(rc == 0); 6161 6162 ut_init_bdev(NULL); 6163 bdev = allocate_bdev("bdev"); 6164 6165 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6166 CU_ASSERT_EQUAL(rc, 0); 6167 SPDK_CU_ASSERT_FATAL(desc != NULL); 6168 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6169 ioch = spdk_bdev_get_io_channel(desc); 6170 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6171 bdev_ch = spdk_io_channel_get_ctx(ioch); 6172 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6173 6174 fn_table.submit_request = stub_submit_request; 6175 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6176 6177 /* Case 1: First test the request won't be split */ 6178 num_blocks = 32; 6179 src_offset = bdev->blockcnt - num_blocks; 6180 6181 g_io_done = false; 6182 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6183 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6184 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6185 CU_ASSERT_EQUAL(rc, 0); 6186 CU_ASSERT(g_io_done == false); 6187 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6188 stub_complete_io(1); 6189 CU_ASSERT(g_io_done == true); 6190 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6191 6192 /* Case 2: Test the split with 2 children requests */ 6193 max_copy_blocks = 8; 6194 bdev->max_copy = max_copy_blocks; 6195 num_children = 2; 6196 num_blocks = max_copy_blocks * num_children; 6197 offset = 0; 6198 src_offset = bdev->blockcnt - num_blocks; 6199 6200 g_io_done = false; 6201 for (i = 0; i < num_children; i++) { 6202 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6203 src_offset + offset, max_copy_blocks); 6204 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6205 offset += max_copy_blocks; 6206 } 6207 6208 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6209 CU_ASSERT_EQUAL(rc, 0); 6210 CU_ASSERT(g_io_done == false); 6211 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6212 stub_complete_io(num_children); 6213 CU_ASSERT(g_io_done == true); 6214 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6215 6216 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6217 num_children = 15; 6218 num_blocks = max_copy_blocks * num_children; 6219 offset = 0; 6220 src_offset = bdev->blockcnt - num_blocks; 6221 6222 g_io_done = false; 6223 for (i = 0; i < num_children; i++) { 6224 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6225 src_offset + offset, max_copy_blocks); 6226 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6227 offset += max_copy_blocks; 6228 } 6229 6230 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6231 CU_ASSERT_EQUAL(rc, 0); 6232 CU_ASSERT(g_io_done == false); 6233 6234 while (num_children > 0) { 6235 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6236 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6237 stub_complete_io(num_outstanding); 6238 num_children -= num_outstanding; 6239 } 6240 CU_ASSERT(g_io_done == true); 6241 6242 spdk_put_io_channel(ioch); 6243 spdk_bdev_close(desc); 6244 free_bdev(bdev); 6245 ut_fini_bdev(); 6246 } 6247 6248 static void 6249 examine_claim_v1(struct spdk_bdev *bdev) 6250 { 6251 int rc; 6252 6253 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6254 CU_ASSERT(rc == 0); 6255 } 6256 6257 static void 6258 examine_no_lock_held(struct spdk_bdev *bdev) 6259 { 6260 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6261 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6262 } 6263 6264 struct examine_claim_v2_ctx { 6265 struct ut_examine_ctx examine_ctx; 6266 enum spdk_bdev_claim_type claim_type; 6267 struct spdk_bdev_desc *desc; 6268 }; 6269 6270 static void 6271 examine_claim_v2(struct spdk_bdev *bdev) 6272 { 6273 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6274 int rc; 6275 6276 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6277 CU_ASSERT(rc == 0); 6278 6279 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6280 CU_ASSERT(rc == 0); 6281 } 6282 6283 static void 6284 examine_locks(void) 6285 { 6286 struct spdk_bdev *bdev; 6287 struct ut_examine_ctx ctx = { 0 }; 6288 struct examine_claim_v2_ctx v2_ctx; 6289 6290 /* Without any claims, one code path is taken */ 6291 ctx.examine_config = examine_no_lock_held; 6292 ctx.examine_disk = examine_no_lock_held; 6293 bdev = allocate_bdev_ctx("bdev0", &ctx); 6294 CU_ASSERT(ctx.examine_config_count == 1); 6295 CU_ASSERT(ctx.examine_disk_count == 1); 6296 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6297 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6298 free_bdev(bdev); 6299 6300 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6301 memset(&ctx, 0, sizeof(ctx)); 6302 ctx.examine_config = examine_claim_v1; 6303 ctx.examine_disk = examine_no_lock_held; 6304 bdev = allocate_bdev_ctx("bdev0", &ctx); 6305 CU_ASSERT(ctx.examine_config_count == 1); 6306 CU_ASSERT(ctx.examine_disk_count == 1); 6307 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6308 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6309 spdk_bdev_module_release_bdev(bdev); 6310 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6311 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6312 free_bdev(bdev); 6313 6314 /* Exercise the final path that comes with v2 claims. */ 6315 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6316 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6317 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6318 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6319 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6320 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6321 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6322 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6323 spdk_bdev_close(v2_ctx.desc); 6324 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6325 free_bdev(bdev); 6326 } 6327 6328 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6329 do { \ 6330 uint32_t len = 0; \ 6331 struct spdk_bdev_module_claim *claim; \ 6332 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6333 len++; \ 6334 } \ 6335 CU_ASSERT(len == expect); \ 6336 } while (0) 6337 6338 static void 6339 claim_v2_rwo(void) 6340 { 6341 struct spdk_bdev *bdev; 6342 struct spdk_bdev_desc *desc; 6343 struct spdk_bdev_desc *desc2; 6344 struct spdk_bdev_claim_opts opts; 6345 int rc; 6346 6347 bdev = allocate_bdev("bdev0"); 6348 6349 /* Claim without options */ 6350 desc = NULL; 6351 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6352 CU_ASSERT(rc == 0); 6353 SPDK_CU_ASSERT_FATAL(desc != NULL); 6354 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6355 &bdev_ut_if); 6356 CU_ASSERT(rc == 0); 6357 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6358 CU_ASSERT(desc->claim != NULL); 6359 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6360 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6361 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6362 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6363 6364 /* Release the claim by closing the descriptor */ 6365 spdk_bdev_close(desc); 6366 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6367 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6368 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6369 6370 /* Claim with options */ 6371 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6372 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6373 desc = NULL; 6374 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6375 CU_ASSERT(rc == 0); 6376 SPDK_CU_ASSERT_FATAL(desc != NULL); 6377 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6378 &bdev_ut_if); 6379 CU_ASSERT(rc == 0); 6380 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6381 CU_ASSERT(desc->claim != NULL); 6382 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6383 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6384 memset(&opts, 0, sizeof(opts)); 6385 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6386 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6387 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6388 6389 /* The claim blocks new writers. */ 6390 desc2 = NULL; 6391 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6392 CU_ASSERT(rc == -EPERM); 6393 CU_ASSERT(desc2 == NULL); 6394 6395 /* New readers are allowed */ 6396 desc2 = NULL; 6397 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6398 CU_ASSERT(rc == 0); 6399 CU_ASSERT(desc2 != NULL); 6400 CU_ASSERT(!desc2->write); 6401 6402 /* No new v2 RWO claims are allowed */ 6403 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6404 &bdev_ut_if); 6405 CU_ASSERT(rc == -EPERM); 6406 6407 /* No new v2 ROM claims are allowed */ 6408 CU_ASSERT(!desc2->write); 6409 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6410 &bdev_ut_if); 6411 CU_ASSERT(rc == -EPERM); 6412 CU_ASSERT(!desc2->write); 6413 6414 /* No new v2 RWM claims are allowed */ 6415 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6416 opts.shared_claim_key = (uint64_t)&opts; 6417 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6418 &bdev_ut_if); 6419 CU_ASSERT(rc == -EPERM); 6420 CU_ASSERT(!desc2->write); 6421 6422 /* No new v1 claims are allowed */ 6423 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6424 CU_ASSERT(rc == -EPERM); 6425 6426 /* None of the above changed the existing claim */ 6427 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6428 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6429 6430 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6431 spdk_bdev_close(desc); 6432 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6433 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6434 CU_ASSERT(!desc2->write); 6435 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6436 &bdev_ut_if); 6437 CU_ASSERT(rc == 0); 6438 CU_ASSERT(desc2->claim != NULL); 6439 CU_ASSERT(desc2->write); 6440 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6441 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6442 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6443 spdk_bdev_close(desc2); 6444 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6445 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6446 6447 /* Cannot claim with a key */ 6448 desc = NULL; 6449 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6450 CU_ASSERT(rc == 0); 6451 SPDK_CU_ASSERT_FATAL(desc != NULL); 6452 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6453 opts.shared_claim_key = (uint64_t)&opts; 6454 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6455 &bdev_ut_if); 6456 CU_ASSERT(rc == -EINVAL); 6457 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6458 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6459 spdk_bdev_close(desc); 6460 6461 /* Clean up */ 6462 free_bdev(bdev); 6463 } 6464 6465 static void 6466 claim_v2_rom(void) 6467 { 6468 struct spdk_bdev *bdev; 6469 struct spdk_bdev_desc *desc; 6470 struct spdk_bdev_desc *desc2; 6471 struct spdk_bdev_claim_opts opts; 6472 int rc; 6473 6474 bdev = allocate_bdev("bdev0"); 6475 6476 /* Claim without options */ 6477 desc = NULL; 6478 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6479 CU_ASSERT(rc == 0); 6480 SPDK_CU_ASSERT_FATAL(desc != NULL); 6481 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6482 &bdev_ut_if); 6483 CU_ASSERT(rc == 0); 6484 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6485 CU_ASSERT(desc->claim != NULL); 6486 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6487 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6488 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6489 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6490 6491 /* Release the claim by closing the descriptor */ 6492 spdk_bdev_close(desc); 6493 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6494 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6495 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6496 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6497 6498 /* Claim with options */ 6499 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6500 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6501 desc = NULL; 6502 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6503 CU_ASSERT(rc == 0); 6504 SPDK_CU_ASSERT_FATAL(desc != NULL); 6505 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6506 &bdev_ut_if); 6507 CU_ASSERT(rc == 0); 6508 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6509 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6510 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6511 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6512 memset(&opts, 0, sizeof(opts)); 6513 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6514 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6515 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6516 6517 /* The claim blocks new writers. */ 6518 desc2 = NULL; 6519 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6520 CU_ASSERT(rc == -EPERM); 6521 CU_ASSERT(desc2 == NULL); 6522 6523 /* New readers are allowed */ 6524 desc2 = NULL; 6525 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6526 CU_ASSERT(rc == 0); 6527 CU_ASSERT(desc2 != NULL); 6528 CU_ASSERT(!desc2->write); 6529 6530 /* No new v2 RWO claims are allowed */ 6531 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6532 &bdev_ut_if); 6533 CU_ASSERT(rc == -EPERM); 6534 6535 /* No new v2 RWM claims are allowed */ 6536 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6537 opts.shared_claim_key = (uint64_t)&opts; 6538 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6539 &bdev_ut_if); 6540 CU_ASSERT(rc == -EPERM); 6541 CU_ASSERT(!desc2->write); 6542 6543 /* No new v1 claims are allowed */ 6544 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6545 CU_ASSERT(rc == -EPERM); 6546 6547 /* None of the above messed up the existing claim */ 6548 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6549 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6550 6551 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 6552 CU_ASSERT(!desc2->write); 6553 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6554 &bdev_ut_if); 6555 CU_ASSERT(rc == 0); 6556 CU_ASSERT(!desc2->write); 6557 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6558 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6559 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6560 6561 /* Claim remains when closing the first descriptor */ 6562 spdk_bdev_close(desc); 6563 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6564 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6565 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6566 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6567 6568 /* Claim removed when closing the other descriptor */ 6569 spdk_bdev_close(desc2); 6570 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6571 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6572 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6573 6574 /* Cannot claim with a key */ 6575 desc = NULL; 6576 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6577 CU_ASSERT(rc == 0); 6578 SPDK_CU_ASSERT_FATAL(desc != NULL); 6579 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6580 opts.shared_claim_key = (uint64_t)&opts; 6581 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6582 &bdev_ut_if); 6583 CU_ASSERT(rc == -EINVAL); 6584 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6585 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6586 spdk_bdev_close(desc); 6587 6588 /* Cannot claim with a read-write descriptor */ 6589 desc = NULL; 6590 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6591 CU_ASSERT(rc == 0); 6592 SPDK_CU_ASSERT_FATAL(desc != NULL); 6593 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6594 &bdev_ut_if); 6595 CU_ASSERT(rc == -EINVAL); 6596 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6597 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6598 spdk_bdev_close(desc); 6599 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6600 6601 /* Clean up */ 6602 free_bdev(bdev); 6603 } 6604 6605 static void 6606 claim_v2_rwm(void) 6607 { 6608 struct spdk_bdev *bdev; 6609 struct spdk_bdev_desc *desc; 6610 struct spdk_bdev_desc *desc2; 6611 struct spdk_bdev_claim_opts opts; 6612 char good_key, bad_key; 6613 int rc; 6614 6615 bdev = allocate_bdev("bdev0"); 6616 6617 /* Claim without options should fail */ 6618 desc = NULL; 6619 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6620 CU_ASSERT(rc == 0); 6621 SPDK_CU_ASSERT_FATAL(desc != NULL); 6622 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 6623 &bdev_ut_if); 6624 CU_ASSERT(rc == -EINVAL); 6625 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6626 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6627 CU_ASSERT(desc->claim == NULL); 6628 6629 /* Claim with options */ 6630 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6631 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6632 opts.shared_claim_key = (uint64_t)&good_key; 6633 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6634 &bdev_ut_if); 6635 CU_ASSERT(rc == 0); 6636 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6637 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6638 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6639 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6640 memset(&opts, 0, sizeof(opts)); 6641 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6642 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6643 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6644 6645 /* The claim blocks new writers. */ 6646 desc2 = NULL; 6647 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6648 CU_ASSERT(rc == -EPERM); 6649 CU_ASSERT(desc2 == NULL); 6650 6651 /* New readers are allowed */ 6652 desc2 = NULL; 6653 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6654 CU_ASSERT(rc == 0); 6655 CU_ASSERT(desc2 != NULL); 6656 CU_ASSERT(!desc2->write); 6657 6658 /* No new v2 RWO claims are allowed */ 6659 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6660 &bdev_ut_if); 6661 CU_ASSERT(rc == -EPERM); 6662 6663 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 6664 CU_ASSERT(!desc2->write); 6665 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6666 &bdev_ut_if); 6667 CU_ASSERT(rc == -EPERM); 6668 CU_ASSERT(!desc2->write); 6669 6670 /* No new v1 claims are allowed */ 6671 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6672 CU_ASSERT(rc == -EPERM); 6673 6674 /* No new v2 RWM claims are allowed if the key does not match */ 6675 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6676 opts.shared_claim_key = (uint64_t)&bad_key; 6677 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6678 &bdev_ut_if); 6679 CU_ASSERT(rc == -EPERM); 6680 CU_ASSERT(!desc2->write); 6681 6682 /* None of the above messed up the existing claim */ 6683 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6684 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6685 6686 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 6687 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6688 opts.shared_claim_key = (uint64_t)&good_key; 6689 CU_ASSERT(!desc2->write); 6690 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6691 &bdev_ut_if); 6692 CU_ASSERT(rc == 0); 6693 CU_ASSERT(desc2->write); 6694 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6695 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6696 6697 /* Claim remains when closing the first descriptor */ 6698 spdk_bdev_close(desc); 6699 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6700 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6701 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6702 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6703 6704 /* Claim removed when closing the other descriptor */ 6705 spdk_bdev_close(desc2); 6706 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6707 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6708 6709 /* Cannot claim without a key */ 6710 desc = NULL; 6711 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6712 CU_ASSERT(rc == 0); 6713 SPDK_CU_ASSERT_FATAL(desc != NULL); 6714 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6715 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6716 &bdev_ut_if); 6717 CU_ASSERT(rc == -EINVAL); 6718 spdk_bdev_close(desc); 6719 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6720 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6721 6722 /* Clean up */ 6723 free_bdev(bdev); 6724 } 6725 6726 static void 6727 claim_v2_existing_writer(void) 6728 { 6729 struct spdk_bdev *bdev; 6730 struct spdk_bdev_desc *desc; 6731 struct spdk_bdev_desc *desc2; 6732 struct spdk_bdev_claim_opts opts; 6733 enum spdk_bdev_claim_type type; 6734 enum spdk_bdev_claim_type types[] = { 6735 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6736 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6737 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6738 }; 6739 size_t i; 6740 int rc; 6741 6742 bdev = allocate_bdev("bdev0"); 6743 6744 desc = NULL; 6745 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6746 CU_ASSERT(rc == 0); 6747 SPDK_CU_ASSERT_FATAL(desc != NULL); 6748 desc2 = NULL; 6749 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6750 CU_ASSERT(rc == 0); 6751 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 6752 6753 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6754 type = types[i]; 6755 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6756 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6757 opts.shared_claim_key = (uint64_t)&opts; 6758 } 6759 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6760 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 6761 CU_ASSERT(rc == -EINVAL); 6762 } else { 6763 CU_ASSERT(rc == -EPERM); 6764 } 6765 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6766 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 6767 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 6768 CU_ASSERT(rc == -EINVAL); 6769 } else { 6770 CU_ASSERT(rc == -EPERM); 6771 } 6772 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6773 } 6774 6775 spdk_bdev_close(desc); 6776 spdk_bdev_close(desc2); 6777 6778 /* Clean up */ 6779 free_bdev(bdev); 6780 } 6781 6782 static void 6783 claim_v2_existing_v1(void) 6784 { 6785 struct spdk_bdev *bdev; 6786 struct spdk_bdev_desc *desc; 6787 struct spdk_bdev_claim_opts opts; 6788 enum spdk_bdev_claim_type type; 6789 enum spdk_bdev_claim_type types[] = { 6790 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6791 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6792 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6793 }; 6794 size_t i; 6795 int rc; 6796 6797 bdev = allocate_bdev("bdev0"); 6798 6799 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6800 CU_ASSERT(rc == 0); 6801 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6802 6803 desc = NULL; 6804 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6805 CU_ASSERT(rc == 0); 6806 SPDK_CU_ASSERT_FATAL(desc != NULL); 6807 6808 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6809 type = types[i]; 6810 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6811 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6812 opts.shared_claim_key = (uint64_t)&opts; 6813 } 6814 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6815 CU_ASSERT(rc == -EPERM); 6816 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6817 } 6818 6819 spdk_bdev_module_release_bdev(bdev); 6820 spdk_bdev_close(desc); 6821 6822 /* Clean up */ 6823 free_bdev(bdev); 6824 } 6825 6826 static void 6827 claim_v1_existing_v2(void) 6828 { 6829 struct spdk_bdev *bdev; 6830 struct spdk_bdev_desc *desc; 6831 struct spdk_bdev_claim_opts opts; 6832 enum spdk_bdev_claim_type type; 6833 enum spdk_bdev_claim_type types[] = { 6834 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 6835 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 6836 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 6837 }; 6838 size_t i; 6839 int rc; 6840 6841 bdev = allocate_bdev("bdev0"); 6842 6843 for (i = 0; i < SPDK_COUNTOF(types); i++) { 6844 type = types[i]; 6845 6846 desc = NULL; 6847 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6848 CU_ASSERT(rc == 0); 6849 SPDK_CU_ASSERT_FATAL(desc != NULL); 6850 6851 /* Get a v2 claim */ 6852 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6853 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 6854 opts.shared_claim_key = (uint64_t)&opts; 6855 } 6856 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 6857 CU_ASSERT(rc == 0); 6858 6859 /* Fail to get a v1 claim */ 6860 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6861 CU_ASSERT(rc == -EPERM); 6862 6863 spdk_bdev_close(desc); 6864 6865 /* Now v1 succeeds */ 6866 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6867 CU_ASSERT(rc == 0) 6868 spdk_bdev_module_release_bdev(bdev); 6869 } 6870 6871 /* Clean up */ 6872 free_bdev(bdev); 6873 } 6874 6875 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 6876 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 6877 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 6878 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 6879 6880 #define UT_MAX_EXAMINE_MODS 2 6881 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 6882 { 6883 .name = "vbdev_ut_examine0", 6884 .module_init = vbdev_ut_module_init, 6885 .module_fini = vbdev_ut_module_fini, 6886 .examine_config = ut_examine_claimed_config0, 6887 .examine_disk = ut_examine_claimed_disk0, 6888 }, 6889 { 6890 .name = "vbdev_ut_examine1", 6891 .module_init = vbdev_ut_module_init, 6892 .module_fini = vbdev_ut_module_fini, 6893 .examine_config = ut_examine_claimed_config1, 6894 .examine_disk = ut_examine_claimed_disk1, 6895 } 6896 }; 6897 6898 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 6899 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 6900 6901 struct ut_examine_claimed_ctx { 6902 uint32_t examine_config_count; 6903 uint32_t examine_disk_count; 6904 6905 /* Claim type to take, with these options */ 6906 enum spdk_bdev_claim_type claim_type; 6907 struct spdk_bdev_claim_opts claim_opts; 6908 6909 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 6910 int expect_claim_err; 6911 6912 /* Descriptor used for a claim */ 6913 struct spdk_bdev_desc *desc; 6914 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 6915 6916 bool ut_testing_examine_claimed; 6917 6918 static void 6919 reset_examine_claimed_ctx(void) 6920 { 6921 struct ut_examine_claimed_ctx *ctx; 6922 uint32_t i; 6923 6924 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 6925 ctx = &examine_claimed_ctx[i]; 6926 if (ctx->desc != NULL) { 6927 spdk_bdev_close(ctx->desc); 6928 } 6929 memset(ctx, 0, sizeof(*ctx)); 6930 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 6931 } 6932 } 6933 6934 static void 6935 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 6936 { 6937 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 6938 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 6939 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 6940 int rc; 6941 6942 if (!ut_testing_examine_claimed) { 6943 spdk_bdev_module_examine_done(module); 6944 return; 6945 } 6946 6947 ctx->examine_config_count++; 6948 6949 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 6950 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 6951 &ctx->desc); 6952 CU_ASSERT(rc == 0); 6953 6954 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 6955 CU_ASSERT(rc == ctx->expect_claim_err); 6956 } 6957 spdk_bdev_module_examine_done(module); 6958 } 6959 6960 static void 6961 ut_examine_claimed_config0(struct spdk_bdev *bdev) 6962 { 6963 examine_claimed_config(bdev, 0); 6964 } 6965 6966 static void 6967 ut_examine_claimed_config1(struct spdk_bdev *bdev) 6968 { 6969 examine_claimed_config(bdev, 1); 6970 } 6971 6972 static void 6973 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 6974 { 6975 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 6976 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 6977 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 6978 6979 if (!ut_testing_examine_claimed) { 6980 spdk_bdev_module_examine_done(module); 6981 return; 6982 } 6983 6984 ctx->examine_disk_count++; 6985 6986 spdk_bdev_module_examine_done(module); 6987 } 6988 6989 static void 6990 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 6991 { 6992 examine_claimed_disk(bdev, 0); 6993 } 6994 6995 static void 6996 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 6997 { 6998 examine_claimed_disk(bdev, 1); 6999 } 7000 7001 static void 7002 examine_claimed(void) 7003 { 7004 struct spdk_bdev *bdev; 7005 struct spdk_bdev_module *mod = examine_claimed_mods; 7006 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 7007 7008 ut_testing_examine_claimed = true; 7009 reset_examine_claimed_ctx(); 7010 7011 /* 7012 * With one module claiming, both modules' examine_config should be called, but only the 7013 * claiming module's examine_disk should be called. 7014 */ 7015 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7016 bdev = allocate_bdev("bdev0"); 7017 CU_ASSERT(ctx[0].examine_config_count == 1); 7018 CU_ASSERT(ctx[0].examine_disk_count == 1); 7019 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7020 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7021 CU_ASSERT(ctx[1].examine_config_count == 1); 7022 CU_ASSERT(ctx[1].examine_disk_count == 0); 7023 CU_ASSERT(ctx[1].desc == NULL); 7024 reset_examine_claimed_ctx(); 7025 free_bdev(bdev); 7026 7027 /* 7028 * With two modules claiming, both modules' examine_config and examine_disk should be 7029 * called. 7030 */ 7031 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7032 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7033 bdev = allocate_bdev("bdev0"); 7034 CU_ASSERT(ctx[0].examine_config_count == 1); 7035 CU_ASSERT(ctx[0].examine_disk_count == 1); 7036 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7037 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7038 CU_ASSERT(ctx[1].examine_config_count == 1); 7039 CU_ASSERT(ctx[1].examine_disk_count == 1); 7040 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7041 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7042 reset_examine_claimed_ctx(); 7043 free_bdev(bdev); 7044 7045 /* 7046 * If two vbdev modules try to claim with conflicting claim types, the module that was added 7047 * last wins. The winner gets the claim and is the only one that has its examine_disk 7048 * callback invoked. 7049 */ 7050 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7051 ctx[0].expect_claim_err = -EPERM; 7052 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 7053 bdev = allocate_bdev("bdev0"); 7054 CU_ASSERT(ctx[0].examine_config_count == 1); 7055 CU_ASSERT(ctx[0].examine_disk_count == 0); 7056 CU_ASSERT(ctx[1].examine_config_count == 1); 7057 CU_ASSERT(ctx[1].examine_disk_count == 1); 7058 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7059 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7060 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7061 reset_examine_claimed_ctx(); 7062 free_bdev(bdev); 7063 7064 ut_testing_examine_claimed = false; 7065 } 7066 7067 int 7068 main(int argc, char **argv) 7069 { 7070 CU_pSuite suite = NULL; 7071 unsigned int num_failures; 7072 7073 CU_set_error_action(CUEA_ABORT); 7074 CU_initialize_registry(); 7075 7076 suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown); 7077 7078 CU_ADD_TEST(suite, bytes_to_blocks_test); 7079 CU_ADD_TEST(suite, num_blocks_test); 7080 CU_ADD_TEST(suite, io_valid_test); 7081 CU_ADD_TEST(suite, open_write_test); 7082 CU_ADD_TEST(suite, claim_test); 7083 CU_ADD_TEST(suite, alias_add_del_test); 7084 CU_ADD_TEST(suite, get_device_stat_test); 7085 CU_ADD_TEST(suite, bdev_io_types_test); 7086 CU_ADD_TEST(suite, bdev_io_wait_test); 7087 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7088 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7089 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7090 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7091 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7092 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7093 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7094 CU_ADD_TEST(suite, bdev_io_alignment); 7095 CU_ADD_TEST(suite, bdev_histograms); 7096 CU_ADD_TEST(suite, bdev_write_zeroes); 7097 CU_ADD_TEST(suite, bdev_compare_and_write); 7098 CU_ADD_TEST(suite, bdev_compare); 7099 CU_ADD_TEST(suite, bdev_compare_emulated); 7100 CU_ADD_TEST(suite, bdev_zcopy_write); 7101 CU_ADD_TEST(suite, bdev_zcopy_read); 7102 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7103 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7104 CU_ADD_TEST(suite, bdev_open_ext); 7105 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7106 CU_ADD_TEST(suite, bdev_set_io_timeout); 7107 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7108 CU_ADD_TEST(suite, lba_range_overlap); 7109 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7110 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7111 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7112 CU_ADD_TEST(suite, bdev_io_abort); 7113 CU_ADD_TEST(suite, bdev_unmap); 7114 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7115 CU_ADD_TEST(suite, bdev_set_options_test); 7116 CU_ADD_TEST(suite, bdev_multi_allocation); 7117 CU_ADD_TEST(suite, bdev_get_memory_domains); 7118 CU_ADD_TEST(suite, bdev_io_ext); 7119 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7120 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7121 CU_ADD_TEST(suite, bdev_io_ext_split); 7122 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7123 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7124 CU_ADD_TEST(suite, bdev_unregister_by_name); 7125 CU_ADD_TEST(suite, for_each_bdev_test); 7126 CU_ADD_TEST(suite, bdev_seek_test); 7127 CU_ADD_TEST(suite, bdev_copy); 7128 CU_ADD_TEST(suite, bdev_copy_split_test); 7129 CU_ADD_TEST(suite, examine_locks); 7130 CU_ADD_TEST(suite, claim_v2_rwo); 7131 CU_ADD_TEST(suite, claim_v2_rom); 7132 CU_ADD_TEST(suite, claim_v2_rwm); 7133 CU_ADD_TEST(suite, claim_v2_existing_writer); 7134 CU_ADD_TEST(suite, claim_v2_existing_v1); 7135 CU_ADD_TEST(suite, claim_v1_existing_v2); 7136 CU_ADD_TEST(suite, examine_claimed); 7137 7138 allocate_cores(1); 7139 allocate_threads(1); 7140 set_thread(0); 7141 7142 CU_basic_set_mode(CU_BRM_VERBOSE); 7143 CU_basic_run_tests(); 7144 num_failures = CU_get_number_of_failures(); 7145 CU_cleanup_registry(); 7146 7147 free_threads(); 7148 free_cores(); 7149 7150 return num_failures; 7151 } 7152