1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 DEFINE_STUB_V(spdk_accel_sequence_finish, 25 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 26 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 27 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 28 DEFINE_STUB(spdk_accel_append_copy, int, 29 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs, 30 uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 31 struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, 32 void *src_domain_ctx, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 33 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL); 34 35 static bool g_memory_domain_pull_data_called; 36 static bool g_memory_domain_push_data_called; 37 static int g_accel_io_device; 38 39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 40 int 41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 42 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 43 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 44 { 45 g_memory_domain_pull_data_called = true; 46 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 47 cpl_cb(cpl_cb_arg, 0); 48 return 0; 49 } 50 51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 52 int 53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 54 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 55 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 56 { 57 g_memory_domain_push_data_called = true; 58 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 59 cpl_cb(cpl_cb_arg, 0); 60 return 0; 61 } 62 63 struct spdk_io_channel * 64 spdk_accel_get_io_channel(void) 65 { 66 return spdk_get_io_channel(&g_accel_io_device); 67 } 68 69 int g_status; 70 int g_count; 71 enum spdk_bdev_event_type g_event_type1; 72 enum spdk_bdev_event_type g_event_type2; 73 enum spdk_bdev_event_type g_event_type3; 74 enum spdk_bdev_event_type g_event_type4; 75 struct spdk_histogram_data *g_histogram; 76 void *g_unregister_arg; 77 int g_unregister_rc; 78 79 void 80 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 81 int *sc, int *sk, int *asc, int *ascq) 82 { 83 } 84 85 static int 86 ut_accel_ch_create_cb(void *io_device, void *ctx) 87 { 88 return 0; 89 } 90 91 static void 92 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 93 { 94 } 95 96 static int 97 ut_bdev_setup(void) 98 { 99 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 100 ut_accel_ch_destroy_cb, 0, NULL); 101 return 0; 102 } 103 104 static int 105 ut_bdev_teardown(void) 106 { 107 spdk_io_device_unregister(&g_accel_io_device, NULL); 108 109 return 0; 110 } 111 112 static int 113 stub_destruct(void *ctx) 114 { 115 return 0; 116 } 117 118 struct ut_expected_io { 119 uint8_t type; 120 uint64_t offset; 121 uint64_t src_offset; 122 uint64_t length; 123 int iovcnt; 124 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 125 void *md_buf; 126 TAILQ_ENTRY(ut_expected_io) link; 127 }; 128 129 struct bdev_ut_io { 130 TAILQ_ENTRY(bdev_ut_io) link; 131 }; 132 133 struct bdev_ut_channel { 134 TAILQ_HEAD(, bdev_ut_io) outstanding_io; 135 uint32_t outstanding_io_count; 136 TAILQ_HEAD(, ut_expected_io) expected_io; 137 }; 138 139 static bool g_io_done; 140 static struct spdk_bdev_io *g_bdev_io; 141 static enum spdk_bdev_io_status g_io_status; 142 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 143 static uint32_t g_bdev_ut_io_device; 144 static struct bdev_ut_channel *g_bdev_ut_channel; 145 static void *g_compare_read_buf; 146 static uint32_t g_compare_read_buf_len; 147 static void *g_compare_write_buf; 148 static uint32_t g_compare_write_buf_len; 149 static void *g_compare_md_buf; 150 static bool g_abort_done; 151 static enum spdk_bdev_io_status g_abort_status; 152 static void *g_zcopy_read_buf; 153 static uint32_t g_zcopy_read_buf_len; 154 static void *g_zcopy_write_buf; 155 static uint32_t g_zcopy_write_buf_len; 156 static struct spdk_bdev_io *g_zcopy_bdev_io; 157 static uint64_t g_seek_data_offset; 158 static uint64_t g_seek_hole_offset; 159 static uint64_t g_seek_offset; 160 161 static struct ut_expected_io * 162 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 163 { 164 struct ut_expected_io *expected_io; 165 166 expected_io = calloc(1, sizeof(*expected_io)); 167 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 168 169 expected_io->type = type; 170 expected_io->offset = offset; 171 expected_io->length = length; 172 expected_io->iovcnt = iovcnt; 173 174 return expected_io; 175 } 176 177 static struct ut_expected_io * 178 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 179 { 180 struct ut_expected_io *expected_io; 181 182 expected_io = calloc(1, sizeof(*expected_io)); 183 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 184 185 expected_io->type = type; 186 expected_io->offset = offset; 187 expected_io->src_offset = src_offset; 188 expected_io->length = length; 189 190 return expected_io; 191 } 192 193 static void 194 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 195 { 196 expected_io->iov[pos].iov_base = base; 197 expected_io->iov[pos].iov_len = len; 198 } 199 200 static void 201 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 202 { 203 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 204 struct ut_expected_io *expected_io; 205 struct iovec *iov, *expected_iov; 206 struct spdk_bdev_io *bio_to_abort; 207 struct bdev_ut_io *bio; 208 int i; 209 210 g_bdev_io = bdev_io; 211 212 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 213 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 214 215 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 216 CU_ASSERT(g_compare_read_buf_len == len); 217 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 218 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 219 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 220 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 221 } 222 } 223 224 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 225 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 226 227 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 228 CU_ASSERT(g_compare_write_buf_len == len); 229 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 230 } 231 232 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 233 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 234 235 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 236 CU_ASSERT(g_compare_read_buf_len == len); 237 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 238 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 239 } 240 if (bdev_io->u.bdev.md_buf && 241 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 242 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 243 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 244 } 245 } 246 247 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 248 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 249 TAILQ_FOREACH(bio, &ch->outstanding_io, link) { 250 bio_to_abort = spdk_bdev_io_from_ctx(bio); 251 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 252 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 253 ch->outstanding_io_count--; 254 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 255 break; 256 } 257 } 258 } 259 } 260 261 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 262 if (bdev_io->u.bdev.zcopy.start) { 263 g_zcopy_bdev_io = bdev_io; 264 if (bdev_io->u.bdev.zcopy.populate) { 265 /* Start of a read */ 266 CU_ASSERT(g_zcopy_read_buf != NULL); 267 CU_ASSERT(g_zcopy_read_buf_len > 0); 268 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 269 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 270 bdev_io->u.bdev.iovcnt = 1; 271 } else { 272 /* Start of a write */ 273 CU_ASSERT(g_zcopy_write_buf != NULL); 274 CU_ASSERT(g_zcopy_write_buf_len > 0); 275 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 276 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 277 bdev_io->u.bdev.iovcnt = 1; 278 } 279 } else { 280 if (bdev_io->u.bdev.zcopy.commit) { 281 /* End of write */ 282 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 283 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 284 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 285 g_zcopy_write_buf = NULL; 286 g_zcopy_write_buf_len = 0; 287 } else { 288 /* End of read */ 289 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 290 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 291 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 292 g_zcopy_read_buf = NULL; 293 g_zcopy_read_buf_len = 0; 294 } 295 } 296 } 297 298 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 299 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 300 } 301 302 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 303 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 304 } 305 306 TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct bdev_ut_io *)bdev_io->driver_ctx, link); 307 ch->outstanding_io_count++; 308 309 expected_io = TAILQ_FIRST(&ch->expected_io); 310 if (expected_io == NULL) { 311 return; 312 } 313 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 314 315 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 316 CU_ASSERT(bdev_io->type == expected_io->type); 317 } 318 319 if (expected_io->md_buf != NULL) { 320 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 321 } 322 323 if (expected_io->length == 0) { 324 free(expected_io); 325 return; 326 } 327 328 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 329 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 330 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 331 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 332 } 333 334 if (expected_io->iovcnt == 0) { 335 free(expected_io); 336 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 337 return; 338 } 339 340 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 341 for (i = 0; i < expected_io->iovcnt; i++) { 342 expected_iov = &expected_io->iov[i]; 343 if (bdev_io->internal.orig_iovcnt == 0) { 344 iov = &bdev_io->u.bdev.iovs[i]; 345 } else { 346 iov = bdev_io->internal.orig_iovs; 347 } 348 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 349 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 350 } 351 352 free(expected_io); 353 } 354 355 static void 356 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 357 struct spdk_bdev_io *bdev_io, bool success) 358 { 359 CU_ASSERT(success == true); 360 361 stub_submit_request(_ch, bdev_io); 362 } 363 364 static void 365 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 366 { 367 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 368 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 369 } 370 371 static uint32_t 372 stub_complete_io(uint32_t num_to_complete) 373 { 374 struct bdev_ut_channel *ch = g_bdev_ut_channel; 375 struct bdev_ut_io *bio; 376 struct spdk_bdev_io *bdev_io; 377 static enum spdk_bdev_io_status io_status; 378 uint32_t num_completed = 0; 379 380 while (num_completed < num_to_complete) { 381 if (TAILQ_EMPTY(&ch->outstanding_io)) { 382 break; 383 } 384 bio = TAILQ_FIRST(&ch->outstanding_io); 385 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 386 bdev_io = spdk_bdev_io_from_ctx(bio); 387 ch->outstanding_io_count--; 388 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 389 g_io_exp_status; 390 spdk_bdev_io_complete(bdev_io, io_status); 391 num_completed++; 392 } 393 394 return num_completed; 395 } 396 397 static struct spdk_io_channel * 398 bdev_ut_get_io_channel(void *ctx) 399 { 400 return spdk_get_io_channel(&g_bdev_ut_io_device); 401 } 402 403 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 404 [SPDK_BDEV_IO_TYPE_READ] = true, 405 [SPDK_BDEV_IO_TYPE_WRITE] = true, 406 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 407 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 408 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 409 [SPDK_BDEV_IO_TYPE_RESET] = true, 410 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 411 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 412 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 413 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 414 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 415 [SPDK_BDEV_IO_TYPE_ABORT] = true, 416 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 417 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 418 [SPDK_BDEV_IO_TYPE_COPY] = true, 419 }; 420 421 static void 422 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 423 { 424 g_io_types_supported[io_type] = enable; 425 } 426 427 static bool 428 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 429 { 430 return g_io_types_supported[io_type]; 431 } 432 433 static struct spdk_bdev_fn_table fn_table = { 434 .destruct = stub_destruct, 435 .submit_request = stub_submit_request, 436 .get_io_channel = bdev_ut_get_io_channel, 437 .io_type_supported = stub_io_type_supported, 438 }; 439 440 static int 441 bdev_ut_create_ch(void *io_device, void *ctx_buf) 442 { 443 struct bdev_ut_channel *ch = ctx_buf; 444 445 CU_ASSERT(g_bdev_ut_channel == NULL); 446 g_bdev_ut_channel = ch; 447 448 TAILQ_INIT(&ch->outstanding_io); 449 ch->outstanding_io_count = 0; 450 TAILQ_INIT(&ch->expected_io); 451 return 0; 452 } 453 454 static void 455 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 456 { 457 CU_ASSERT(g_bdev_ut_channel != NULL); 458 g_bdev_ut_channel = NULL; 459 } 460 461 struct spdk_bdev_module bdev_ut_if; 462 463 static int 464 bdev_ut_module_init(void) 465 { 466 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 467 sizeof(struct bdev_ut_channel), NULL); 468 spdk_bdev_module_init_done(&bdev_ut_if); 469 return 0; 470 } 471 472 static void 473 bdev_ut_module_fini(void) 474 { 475 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 476 } 477 478 struct spdk_bdev_module bdev_ut_if = { 479 .name = "bdev_ut", 480 .module_init = bdev_ut_module_init, 481 .module_fini = bdev_ut_module_fini, 482 .async_init = true, 483 }; 484 485 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 486 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 487 488 static int 489 vbdev_ut_module_init(void) 490 { 491 return 0; 492 } 493 494 static void 495 vbdev_ut_module_fini(void) 496 { 497 } 498 499 static int 500 vbdev_ut_get_ctx_size(void) 501 { 502 return sizeof(struct bdev_ut_io); 503 } 504 505 struct spdk_bdev_module vbdev_ut_if = { 506 .name = "vbdev_ut", 507 .module_init = vbdev_ut_module_init, 508 .module_fini = vbdev_ut_module_fini, 509 .examine_config = vbdev_ut_examine_config, 510 .examine_disk = vbdev_ut_examine_disk, 511 .get_ctx_size = vbdev_ut_get_ctx_size, 512 }; 513 514 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 515 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 516 517 struct ut_examine_ctx { 518 void (*examine_config)(struct spdk_bdev *bdev); 519 void (*examine_disk)(struct spdk_bdev *bdev); 520 uint32_t examine_config_count; 521 uint32_t examine_disk_count; 522 }; 523 524 static void 525 vbdev_ut_examine_config(struct spdk_bdev *bdev) 526 { 527 struct ut_examine_ctx *ctx = bdev->ctxt; 528 529 if (ctx != NULL) { 530 ctx->examine_config_count++; 531 if (ctx->examine_config != NULL) { 532 ctx->examine_config(bdev); 533 } 534 } 535 536 spdk_bdev_module_examine_done(&vbdev_ut_if); 537 } 538 539 static void 540 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 541 { 542 struct ut_examine_ctx *ctx = bdev->ctxt; 543 544 if (ctx != NULL) { 545 ctx->examine_disk_count++; 546 if (ctx->examine_disk != NULL) { 547 ctx->examine_disk(bdev); 548 } 549 } 550 551 spdk_bdev_module_examine_done(&vbdev_ut_if); 552 } 553 554 static void 555 bdev_init_cb(void *arg, int rc) 556 { 557 CU_ASSERT(rc == 0); 558 } 559 560 static void 561 bdev_fini_cb(void *arg) 562 { 563 } 564 565 static void 566 ut_init_bdev(struct spdk_bdev_opts *opts) 567 { 568 int rc; 569 570 if (opts != NULL) { 571 rc = spdk_bdev_set_opts(opts); 572 CU_ASSERT(rc == 0); 573 } 574 rc = spdk_iobuf_initialize(); 575 CU_ASSERT(rc == 0); 576 spdk_bdev_initialize(bdev_init_cb, NULL); 577 poll_threads(); 578 } 579 580 static void 581 ut_fini_bdev(void) 582 { 583 spdk_bdev_finish(bdev_fini_cb, NULL); 584 spdk_iobuf_finish(bdev_fini_cb, NULL); 585 poll_threads(); 586 } 587 588 static struct spdk_bdev * 589 allocate_bdev_ctx(char *name, void *ctx) 590 { 591 struct spdk_bdev *bdev; 592 int rc; 593 594 bdev = calloc(1, sizeof(*bdev)); 595 SPDK_CU_ASSERT_FATAL(bdev != NULL); 596 597 bdev->ctxt = ctx; 598 bdev->name = name; 599 bdev->fn_table = &fn_table; 600 bdev->module = &bdev_ut_if; 601 bdev->blockcnt = 1024; 602 bdev->blocklen = 512; 603 604 spdk_uuid_generate(&bdev->uuid); 605 606 rc = spdk_bdev_register(bdev); 607 poll_threads(); 608 CU_ASSERT(rc == 0); 609 610 return bdev; 611 } 612 613 static struct spdk_bdev * 614 allocate_bdev(char *name) 615 { 616 return allocate_bdev_ctx(name, NULL); 617 } 618 619 static struct spdk_bdev * 620 allocate_vbdev(char *name) 621 { 622 struct spdk_bdev *bdev; 623 int rc; 624 625 bdev = calloc(1, sizeof(*bdev)); 626 SPDK_CU_ASSERT_FATAL(bdev != NULL); 627 628 bdev->name = name; 629 bdev->fn_table = &fn_table; 630 bdev->module = &vbdev_ut_if; 631 bdev->blockcnt = 1024; 632 bdev->blocklen = 512; 633 634 rc = spdk_bdev_register(bdev); 635 poll_threads(); 636 CU_ASSERT(rc == 0); 637 638 return bdev; 639 } 640 641 static void 642 free_bdev(struct spdk_bdev *bdev) 643 { 644 spdk_bdev_unregister(bdev, NULL, NULL); 645 poll_threads(); 646 memset(bdev, 0xFF, sizeof(*bdev)); 647 free(bdev); 648 } 649 650 static void 651 free_vbdev(struct spdk_bdev *bdev) 652 { 653 spdk_bdev_unregister(bdev, NULL, NULL); 654 poll_threads(); 655 memset(bdev, 0xFF, sizeof(*bdev)); 656 free(bdev); 657 } 658 659 static void 660 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 661 { 662 const char *bdev_name; 663 664 CU_ASSERT(bdev != NULL); 665 CU_ASSERT(rc == 0); 666 bdev_name = spdk_bdev_get_name(bdev); 667 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 668 669 free(stat); 670 671 *(bool *)cb_arg = true; 672 } 673 674 static void 675 bdev_unregister_cb(void *cb_arg, int rc) 676 { 677 g_unregister_arg = cb_arg; 678 g_unregister_rc = rc; 679 } 680 681 static void 682 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 683 { 684 } 685 686 static void 687 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 688 { 689 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 690 691 g_event_type1 = type; 692 if (SPDK_BDEV_EVENT_REMOVE == type) { 693 spdk_bdev_close(desc); 694 } 695 } 696 697 static void 698 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 699 { 700 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 701 702 g_event_type2 = type; 703 if (SPDK_BDEV_EVENT_REMOVE == type) { 704 spdk_bdev_close(desc); 705 } 706 } 707 708 static void 709 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 710 { 711 g_event_type3 = type; 712 } 713 714 static void 715 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 716 { 717 g_event_type4 = type; 718 } 719 720 static void 721 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 722 { 723 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 724 spdk_bdev_free_io(bdev_io); 725 } 726 727 static void 728 get_device_stat_test(void) 729 { 730 struct spdk_bdev *bdev; 731 struct spdk_bdev_io_stat *stat; 732 bool done; 733 734 bdev = allocate_bdev("bdev0"); 735 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 736 if (stat == NULL) { 737 free_bdev(bdev); 738 return; 739 } 740 741 done = false; 742 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 743 while (!done) { poll_threads(); } 744 745 free_bdev(bdev); 746 } 747 748 static void 749 open_write_test(void) 750 { 751 struct spdk_bdev *bdev[9]; 752 struct spdk_bdev_desc *desc[9] = {}; 753 int rc; 754 755 ut_init_bdev(NULL); 756 757 /* 758 * Create a tree of bdevs to test various open w/ write cases. 759 * 760 * bdev0 through bdev3 are physical block devices, such as NVMe 761 * namespaces or Ceph block devices. 762 * 763 * bdev4 is a virtual bdev with multiple base bdevs. This models 764 * caching or RAID use cases. 765 * 766 * bdev5 through bdev7 are all virtual bdevs with the same base 767 * bdev (except bdev7). This models partitioning or logical volume 768 * use cases. 769 * 770 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 771 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 772 * models caching, RAID, partitioning or logical volumes use cases. 773 * 774 * bdev8 is a virtual bdev with multiple base bdevs, but these 775 * base bdevs are themselves virtual bdevs. 776 * 777 * bdev8 778 * | 779 * +----------+ 780 * | | 781 * bdev4 bdev5 bdev6 bdev7 782 * | | | | 783 * +---+---+ +---+ + +---+---+ 784 * | | \ | / \ 785 * bdev0 bdev1 bdev2 bdev3 786 */ 787 788 bdev[0] = allocate_bdev("bdev0"); 789 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 790 CU_ASSERT(rc == 0); 791 792 bdev[1] = allocate_bdev("bdev1"); 793 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 794 CU_ASSERT(rc == 0); 795 796 bdev[2] = allocate_bdev("bdev2"); 797 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 798 CU_ASSERT(rc == 0); 799 800 bdev[3] = allocate_bdev("bdev3"); 801 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 802 CU_ASSERT(rc == 0); 803 804 bdev[4] = allocate_vbdev("bdev4"); 805 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 806 CU_ASSERT(rc == 0); 807 808 bdev[5] = allocate_vbdev("bdev5"); 809 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 810 CU_ASSERT(rc == 0); 811 812 bdev[6] = allocate_vbdev("bdev6"); 813 814 bdev[7] = allocate_vbdev("bdev7"); 815 816 bdev[8] = allocate_vbdev("bdev8"); 817 818 /* Open bdev0 read-only. This should succeed. */ 819 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 820 CU_ASSERT(rc == 0); 821 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 822 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 823 spdk_bdev_close(desc[0]); 824 825 /* 826 * Open bdev1 read/write. This should fail since bdev1 has been claimed 827 * by a vbdev module. 828 */ 829 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 830 CU_ASSERT(rc == -EPERM); 831 832 /* 833 * Open bdev4 read/write. This should fail since bdev3 has been claimed 834 * by a vbdev module. 835 */ 836 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 837 CU_ASSERT(rc == -EPERM); 838 839 /* Open bdev4 read-only. This should succeed. */ 840 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 841 CU_ASSERT(rc == 0); 842 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 843 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 844 spdk_bdev_close(desc[4]); 845 846 /* 847 * Open bdev8 read/write. This should succeed since it is a leaf 848 * bdev. 849 */ 850 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 851 CU_ASSERT(rc == 0); 852 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 853 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 854 spdk_bdev_close(desc[8]); 855 856 /* 857 * Open bdev5 read/write. This should fail since bdev4 has been claimed 858 * by a vbdev module. 859 */ 860 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 861 CU_ASSERT(rc == -EPERM); 862 863 /* Open bdev4 read-only. This should succeed. */ 864 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 865 CU_ASSERT(rc == 0); 866 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 867 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 868 spdk_bdev_close(desc[5]); 869 870 free_vbdev(bdev[8]); 871 872 free_vbdev(bdev[5]); 873 free_vbdev(bdev[6]); 874 free_vbdev(bdev[7]); 875 876 free_vbdev(bdev[4]); 877 878 free_bdev(bdev[0]); 879 free_bdev(bdev[1]); 880 free_bdev(bdev[2]); 881 free_bdev(bdev[3]); 882 883 ut_fini_bdev(); 884 } 885 886 static void 887 claim_test(void) 888 { 889 struct spdk_bdev *bdev; 890 struct spdk_bdev_desc *desc, *open_desc; 891 int rc; 892 uint32_t count; 893 894 ut_init_bdev(NULL); 895 896 /* 897 * A vbdev that uses a read-only bdev may need it to remain read-only. 898 * To do so, it opens the bdev read-only, then claims it without 899 * passing a spdk_bdev_desc. 900 */ 901 bdev = allocate_bdev("bdev0"); 902 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 903 CU_ASSERT(rc == 0); 904 CU_ASSERT(desc->write == false); 905 906 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 907 CU_ASSERT(rc == 0); 908 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 909 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 910 911 /* There should be only one open descriptor and it should still be ro */ 912 count = 0; 913 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 914 CU_ASSERT(open_desc == desc); 915 CU_ASSERT(!open_desc->write); 916 count++; 917 } 918 CU_ASSERT(count == 1); 919 920 /* A read-only bdev is upgraded to read-write if desc is passed. */ 921 spdk_bdev_module_release_bdev(bdev); 922 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 923 CU_ASSERT(rc == 0); 924 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 925 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 926 927 /* There should be only one open descriptor and it should be rw */ 928 count = 0; 929 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 930 CU_ASSERT(open_desc == desc); 931 CU_ASSERT(open_desc->write); 932 count++; 933 } 934 CU_ASSERT(count == 1); 935 936 spdk_bdev_close(desc); 937 free_bdev(bdev); 938 ut_fini_bdev(); 939 } 940 941 static void 942 bytes_to_blocks_test(void) 943 { 944 struct spdk_bdev bdev; 945 uint64_t offset_blocks, num_blocks; 946 947 memset(&bdev, 0, sizeof(bdev)); 948 949 bdev.blocklen = 512; 950 951 /* All parameters valid */ 952 offset_blocks = 0; 953 num_blocks = 0; 954 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 955 CU_ASSERT(offset_blocks == 1); 956 CU_ASSERT(num_blocks == 2); 957 958 /* Offset not a block multiple */ 959 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 960 961 /* Length not a block multiple */ 962 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 963 964 /* In case blocklen not the power of two */ 965 bdev.blocklen = 100; 966 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 967 CU_ASSERT(offset_blocks == 1); 968 CU_ASSERT(num_blocks == 2); 969 970 /* Offset not a block multiple */ 971 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 972 973 /* Length not a block multiple */ 974 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 975 } 976 977 static void 978 num_blocks_test(void) 979 { 980 struct spdk_bdev *bdev; 981 struct spdk_bdev_desc *desc = NULL; 982 int rc; 983 984 ut_init_bdev(NULL); 985 bdev = allocate_bdev("num_blocks"); 986 987 spdk_bdev_notify_blockcnt_change(bdev, 50); 988 989 /* Growing block number */ 990 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0); 991 /* Shrinking block number */ 992 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0); 993 994 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 995 CU_ASSERT(rc == 0); 996 SPDK_CU_ASSERT_FATAL(desc != NULL); 997 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 998 999 /* Growing block number */ 1000 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0); 1001 /* Shrinking block number */ 1002 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0); 1003 1004 g_event_type1 = 0xFF; 1005 /* Growing block number */ 1006 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0); 1007 1008 poll_threads(); 1009 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 1010 1011 g_event_type1 = 0xFF; 1012 /* Growing block number and closing */ 1013 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0); 1014 1015 spdk_bdev_close(desc); 1016 free_bdev(bdev); 1017 ut_fini_bdev(); 1018 1019 poll_threads(); 1020 1021 /* Callback is not called for closed device */ 1022 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 1023 } 1024 1025 static void 1026 io_valid_test(void) 1027 { 1028 struct spdk_bdev bdev; 1029 1030 memset(&bdev, 0, sizeof(bdev)); 1031 1032 bdev.blocklen = 512; 1033 spdk_spin_init(&bdev.internal.spinlock); 1034 1035 spdk_bdev_notify_blockcnt_change(&bdev, 100); 1036 1037 /* All parameters valid */ 1038 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 1039 1040 /* Last valid block */ 1041 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 1042 1043 /* Offset past end of bdev */ 1044 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 1045 1046 /* Offset + length past end of bdev */ 1047 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 1048 1049 /* Offset near end of uint64_t range (2^64 - 1) */ 1050 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 1051 1052 spdk_spin_destroy(&bdev.internal.spinlock); 1053 } 1054 1055 static void 1056 alias_add_del_test(void) 1057 { 1058 struct spdk_bdev *bdev[3]; 1059 int rc; 1060 1061 ut_init_bdev(NULL); 1062 1063 /* Creating and registering bdevs */ 1064 bdev[0] = allocate_bdev("bdev0"); 1065 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 1066 1067 bdev[1] = allocate_bdev("bdev1"); 1068 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 1069 1070 bdev[2] = allocate_bdev("bdev2"); 1071 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 1072 1073 poll_threads(); 1074 1075 /* 1076 * Trying adding an alias identical to name. 1077 * Alias is identical to name, so it can not be added to aliases list 1078 */ 1079 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 1080 CU_ASSERT(rc == -EEXIST); 1081 1082 /* 1083 * Trying to add empty alias, 1084 * this one should fail 1085 */ 1086 rc = spdk_bdev_alias_add(bdev[0], NULL); 1087 CU_ASSERT(rc == -EINVAL); 1088 1089 /* Trying adding same alias to two different registered bdevs */ 1090 1091 /* Alias is used first time, so this one should pass */ 1092 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1093 CU_ASSERT(rc == 0); 1094 1095 /* Alias was added to another bdev, so this one should fail */ 1096 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1097 CU_ASSERT(rc == -EEXIST); 1098 1099 /* Alias is used first time, so this one should pass */ 1100 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1101 CU_ASSERT(rc == 0); 1102 1103 /* Trying removing an alias from registered bdevs */ 1104 1105 /* Alias is not on a bdev aliases list, so this one should fail */ 1106 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1107 CU_ASSERT(rc == -ENOENT); 1108 1109 /* Alias is present on a bdev aliases list, so this one should pass */ 1110 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1111 CU_ASSERT(rc == 0); 1112 1113 /* Alias is present on a bdev aliases list, so this one should pass */ 1114 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1115 CU_ASSERT(rc == 0); 1116 1117 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1118 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1119 CU_ASSERT(rc != 0); 1120 1121 /* Trying to del all alias from empty alias list */ 1122 spdk_bdev_alias_del_all(bdev[2]); 1123 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1124 1125 /* Trying to del all alias from non-empty alias list */ 1126 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1127 CU_ASSERT(rc == 0); 1128 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1129 CU_ASSERT(rc == 0); 1130 spdk_bdev_alias_del_all(bdev[2]); 1131 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1132 1133 /* Unregister and free bdevs */ 1134 spdk_bdev_unregister(bdev[0], NULL, NULL); 1135 spdk_bdev_unregister(bdev[1], NULL, NULL); 1136 spdk_bdev_unregister(bdev[2], NULL, NULL); 1137 1138 poll_threads(); 1139 1140 free(bdev[0]); 1141 free(bdev[1]); 1142 free(bdev[2]); 1143 1144 ut_fini_bdev(); 1145 } 1146 1147 static void 1148 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1149 { 1150 g_io_done = true; 1151 g_io_status = bdev_io->internal.status; 1152 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1153 (bdev_io->u.bdev.zcopy.start)) { 1154 g_zcopy_bdev_io = bdev_io; 1155 } else { 1156 spdk_bdev_free_io(bdev_io); 1157 g_zcopy_bdev_io = NULL; 1158 } 1159 } 1160 1161 struct bdev_ut_io_wait_entry { 1162 struct spdk_bdev_io_wait_entry entry; 1163 struct spdk_io_channel *io_ch; 1164 struct spdk_bdev_desc *desc; 1165 bool submitted; 1166 }; 1167 1168 static void 1169 io_wait_cb(void *arg) 1170 { 1171 struct bdev_ut_io_wait_entry *entry = arg; 1172 int rc; 1173 1174 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1175 CU_ASSERT(rc == 0); 1176 entry->submitted = true; 1177 } 1178 1179 static void 1180 bdev_io_types_test(void) 1181 { 1182 struct spdk_bdev *bdev; 1183 struct spdk_bdev_desc *desc = NULL; 1184 struct spdk_io_channel *io_ch; 1185 struct spdk_bdev_opts bdev_opts = {}; 1186 int rc; 1187 1188 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1189 bdev_opts.bdev_io_pool_size = 4; 1190 bdev_opts.bdev_io_cache_size = 2; 1191 ut_init_bdev(&bdev_opts); 1192 1193 bdev = allocate_bdev("bdev0"); 1194 1195 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1196 CU_ASSERT(rc == 0); 1197 poll_threads(); 1198 SPDK_CU_ASSERT_FATAL(desc != NULL); 1199 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1200 io_ch = spdk_bdev_get_io_channel(desc); 1201 CU_ASSERT(io_ch != NULL); 1202 1203 /* WRITE and WRITE ZEROES are not supported */ 1204 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1205 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1206 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1207 CU_ASSERT(rc == -ENOTSUP); 1208 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1209 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1210 1211 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1212 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1213 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1214 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1215 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1216 CU_ASSERT(rc == -ENOTSUP); 1217 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1218 CU_ASSERT(rc == -ENOTSUP); 1219 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1220 CU_ASSERT(rc == -ENOTSUP); 1221 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1222 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1223 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1224 1225 spdk_put_io_channel(io_ch); 1226 spdk_bdev_close(desc); 1227 free_bdev(bdev); 1228 ut_fini_bdev(); 1229 } 1230 1231 static void 1232 bdev_io_wait_test(void) 1233 { 1234 struct spdk_bdev *bdev; 1235 struct spdk_bdev_desc *desc = NULL; 1236 struct spdk_io_channel *io_ch; 1237 struct spdk_bdev_opts bdev_opts = {}; 1238 struct bdev_ut_io_wait_entry io_wait_entry; 1239 struct bdev_ut_io_wait_entry io_wait_entry2; 1240 int rc; 1241 1242 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1243 bdev_opts.bdev_io_pool_size = 4; 1244 bdev_opts.bdev_io_cache_size = 2; 1245 ut_init_bdev(&bdev_opts); 1246 1247 bdev = allocate_bdev("bdev0"); 1248 1249 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1250 CU_ASSERT(rc == 0); 1251 poll_threads(); 1252 SPDK_CU_ASSERT_FATAL(desc != NULL); 1253 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1254 io_ch = spdk_bdev_get_io_channel(desc); 1255 CU_ASSERT(io_ch != NULL); 1256 1257 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1258 CU_ASSERT(rc == 0); 1259 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1260 CU_ASSERT(rc == 0); 1261 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1262 CU_ASSERT(rc == 0); 1263 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1264 CU_ASSERT(rc == 0); 1265 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1266 1267 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1268 CU_ASSERT(rc == -ENOMEM); 1269 1270 io_wait_entry.entry.bdev = bdev; 1271 io_wait_entry.entry.cb_fn = io_wait_cb; 1272 io_wait_entry.entry.cb_arg = &io_wait_entry; 1273 io_wait_entry.io_ch = io_ch; 1274 io_wait_entry.desc = desc; 1275 io_wait_entry.submitted = false; 1276 /* Cannot use the same io_wait_entry for two different calls. */ 1277 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1278 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1279 1280 /* Queue two I/O waits. */ 1281 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1282 CU_ASSERT(rc == 0); 1283 CU_ASSERT(io_wait_entry.submitted == false); 1284 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1285 CU_ASSERT(rc == 0); 1286 CU_ASSERT(io_wait_entry2.submitted == false); 1287 1288 stub_complete_io(1); 1289 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1290 CU_ASSERT(io_wait_entry.submitted == true); 1291 CU_ASSERT(io_wait_entry2.submitted == false); 1292 1293 stub_complete_io(1); 1294 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1295 CU_ASSERT(io_wait_entry2.submitted == true); 1296 1297 stub_complete_io(4); 1298 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1299 1300 spdk_put_io_channel(io_ch); 1301 spdk_bdev_close(desc); 1302 free_bdev(bdev); 1303 ut_fini_bdev(); 1304 } 1305 1306 static void 1307 bdev_io_spans_split_test(void) 1308 { 1309 struct spdk_bdev bdev; 1310 struct spdk_bdev_io bdev_io; 1311 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1312 1313 memset(&bdev, 0, sizeof(bdev)); 1314 bdev_io.u.bdev.iovs = iov; 1315 1316 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1317 bdev.optimal_io_boundary = 0; 1318 bdev.max_segment_size = 0; 1319 bdev.max_num_segments = 0; 1320 bdev_io.bdev = &bdev; 1321 1322 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1323 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1324 1325 bdev.split_on_optimal_io_boundary = true; 1326 bdev.optimal_io_boundary = 32; 1327 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1328 1329 /* RESETs are not based on LBAs - so this should return false. */ 1330 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1331 1332 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1333 bdev_io.u.bdev.offset_blocks = 0; 1334 bdev_io.u.bdev.num_blocks = 32; 1335 1336 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1337 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1338 1339 bdev_io.u.bdev.num_blocks = 33; 1340 1341 /* This I/O spans a boundary. */ 1342 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1343 1344 bdev_io.u.bdev.num_blocks = 32; 1345 bdev.max_segment_size = 512 * 32; 1346 bdev.max_num_segments = 1; 1347 bdev_io.u.bdev.iovcnt = 1; 1348 iov[0].iov_len = 512; 1349 1350 /* Does not cross and exceed max_size or max_segs */ 1351 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1352 1353 bdev.split_on_optimal_io_boundary = false; 1354 bdev.max_segment_size = 512; 1355 bdev.max_num_segments = 1; 1356 bdev_io.u.bdev.iovcnt = 2; 1357 1358 /* Exceed max_segs */ 1359 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1360 1361 bdev.max_num_segments = 2; 1362 iov[0].iov_len = 513; 1363 iov[1].iov_len = 512; 1364 1365 /* Exceed max_sizes */ 1366 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1367 1368 bdev.max_segment_size = 0; 1369 bdev.write_unit_size = 32; 1370 bdev.split_on_write_unit = true; 1371 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1372 1373 /* This I/O is one write unit */ 1374 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1375 1376 bdev_io.u.bdev.num_blocks = 32 * 2; 1377 1378 /* This I/O is more than one write unit */ 1379 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1380 1381 bdev_io.u.bdev.offset_blocks = 1; 1382 bdev_io.u.bdev.num_blocks = 32; 1383 1384 /* This I/O is not aligned to write unit size */ 1385 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1386 } 1387 1388 static void 1389 bdev_io_boundary_split_test(void) 1390 { 1391 struct spdk_bdev *bdev; 1392 struct spdk_bdev_desc *desc = NULL; 1393 struct spdk_io_channel *io_ch; 1394 struct spdk_bdev_opts bdev_opts = {}; 1395 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1396 struct ut_expected_io *expected_io; 1397 void *md_buf = (void *)0xFF000000; 1398 uint64_t i; 1399 int rc; 1400 1401 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1402 bdev_opts.bdev_io_pool_size = 512; 1403 bdev_opts.bdev_io_cache_size = 64; 1404 ut_init_bdev(&bdev_opts); 1405 1406 bdev = allocate_bdev("bdev0"); 1407 1408 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1409 CU_ASSERT(rc == 0); 1410 SPDK_CU_ASSERT_FATAL(desc != NULL); 1411 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1412 io_ch = spdk_bdev_get_io_channel(desc); 1413 CU_ASSERT(io_ch != NULL); 1414 1415 bdev->optimal_io_boundary = 16; 1416 bdev->split_on_optimal_io_boundary = false; 1417 1418 g_io_done = false; 1419 1420 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1421 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1422 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1423 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1424 1425 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1426 CU_ASSERT(rc == 0); 1427 CU_ASSERT(g_io_done == false); 1428 1429 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1430 stub_complete_io(1); 1431 CU_ASSERT(g_io_done == true); 1432 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1433 1434 bdev->split_on_optimal_io_boundary = true; 1435 bdev->md_interleave = false; 1436 bdev->md_len = 8; 1437 1438 /* Now test that a single-vector command is split correctly. 1439 * Offset 14, length 8, payload 0xF000 1440 * Child - Offset 14, length 2, payload 0xF000 1441 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1442 * 1443 * Set up the expected values before calling spdk_bdev_read_blocks 1444 */ 1445 g_io_done = false; 1446 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1447 expected_io->md_buf = md_buf; 1448 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1449 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1450 1451 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1452 expected_io->md_buf = md_buf + 2 * 8; 1453 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1454 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1455 1456 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1457 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1458 14, 8, io_done, NULL); 1459 CU_ASSERT(rc == 0); 1460 CU_ASSERT(g_io_done == false); 1461 1462 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1463 stub_complete_io(2); 1464 CU_ASSERT(g_io_done == true); 1465 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1466 1467 /* Now set up a more complex, multi-vector command that needs to be split, 1468 * including splitting iovecs. 1469 */ 1470 iov[0].iov_base = (void *)0x10000; 1471 iov[0].iov_len = 512; 1472 iov[1].iov_base = (void *)0x20000; 1473 iov[1].iov_len = 20 * 512; 1474 iov[2].iov_base = (void *)0x30000; 1475 iov[2].iov_len = 11 * 512; 1476 1477 g_io_done = false; 1478 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1479 expected_io->md_buf = md_buf; 1480 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1481 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1482 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1483 1484 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1485 expected_io->md_buf = md_buf + 2 * 8; 1486 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1487 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1488 1489 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1490 expected_io->md_buf = md_buf + 18 * 8; 1491 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1492 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1493 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1494 1495 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1496 14, 32, io_done, NULL); 1497 CU_ASSERT(rc == 0); 1498 CU_ASSERT(g_io_done == false); 1499 1500 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1501 stub_complete_io(3); 1502 CU_ASSERT(g_io_done == true); 1503 1504 /* Test multi vector command that needs to be split by strip and then needs to be 1505 * split further due to the capacity of child iovs. 1506 */ 1507 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1508 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1509 iov[i].iov_len = 512; 1510 } 1511 1512 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1513 g_io_done = false; 1514 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1515 SPDK_BDEV_IO_NUM_CHILD_IOV); 1516 expected_io->md_buf = md_buf; 1517 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1518 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1519 } 1520 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1521 1522 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1523 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1524 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1525 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1526 ut_expected_io_set_iov(expected_io, i, 1527 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1528 } 1529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1530 1531 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1532 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1533 CU_ASSERT(rc == 0); 1534 CU_ASSERT(g_io_done == false); 1535 1536 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1537 stub_complete_io(1); 1538 CU_ASSERT(g_io_done == false); 1539 1540 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1541 stub_complete_io(1); 1542 CU_ASSERT(g_io_done == true); 1543 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1544 1545 /* Test multi vector command that needs to be split by strip and then needs to be 1546 * split further due to the capacity of child iovs. In this case, the length of 1547 * the rest of iovec array with an I/O boundary is the multiple of block size. 1548 */ 1549 1550 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1551 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1552 */ 1553 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1554 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1555 iov[i].iov_len = 512; 1556 } 1557 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1558 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1559 iov[i].iov_len = 256; 1560 } 1561 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1562 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1563 1564 /* Add an extra iovec to trigger split */ 1565 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1566 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1567 1568 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1569 g_io_done = false; 1570 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1571 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1572 expected_io->md_buf = md_buf; 1573 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1574 ut_expected_io_set_iov(expected_io, i, 1575 (void *)((i + 1) * 0x10000), 512); 1576 } 1577 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1578 ut_expected_io_set_iov(expected_io, i, 1579 (void *)((i + 1) * 0x10000), 256); 1580 } 1581 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1582 1583 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1584 1, 1); 1585 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1586 ut_expected_io_set_iov(expected_io, 0, 1587 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1588 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1589 1590 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1591 1, 1); 1592 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1593 ut_expected_io_set_iov(expected_io, 0, 1594 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1595 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1596 1597 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1598 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1599 CU_ASSERT(rc == 0); 1600 CU_ASSERT(g_io_done == false); 1601 1602 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1603 stub_complete_io(1); 1604 CU_ASSERT(g_io_done == false); 1605 1606 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1607 stub_complete_io(2); 1608 CU_ASSERT(g_io_done == true); 1609 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1610 1611 /* Test multi vector command that needs to be split by strip and then needs to be 1612 * split further due to the capacity of child iovs, the child request offset should 1613 * be rewind to last aligned offset and go success without error. 1614 */ 1615 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1616 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1617 iov[i].iov_len = 512; 1618 } 1619 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1620 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1621 1622 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1623 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1624 1625 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1626 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1627 1628 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1629 g_io_done = false; 1630 g_io_status = 0; 1631 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1633 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1634 expected_io->md_buf = md_buf; 1635 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1636 ut_expected_io_set_iov(expected_io, i, 1637 (void *)((i + 1) * 0x10000), 512); 1638 } 1639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1640 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1641 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1642 1, 2); 1643 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1644 ut_expected_io_set_iov(expected_io, 0, 1645 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1646 ut_expected_io_set_iov(expected_io, 1, 1647 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1648 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1649 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1650 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1651 1, 1); 1652 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1653 ut_expected_io_set_iov(expected_io, 0, 1654 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1655 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1656 1657 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1658 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1659 CU_ASSERT(rc == 0); 1660 CU_ASSERT(g_io_done == false); 1661 1662 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1663 stub_complete_io(1); 1664 CU_ASSERT(g_io_done == false); 1665 1666 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1667 stub_complete_io(2); 1668 CU_ASSERT(g_io_done == true); 1669 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1670 1671 /* Test multi vector command that needs to be split due to the IO boundary and 1672 * the capacity of child iovs. Especially test the case when the command is 1673 * split due to the capacity of child iovs, the tail address is not aligned with 1674 * block size and is rewinded to the aligned address. 1675 * 1676 * The iovecs used in read request is complex but is based on the data 1677 * collected in the real issue. We change the base addresses but keep the lengths 1678 * not to loose the credibility of the test. 1679 */ 1680 bdev->optimal_io_boundary = 128; 1681 g_io_done = false; 1682 g_io_status = 0; 1683 1684 for (i = 0; i < 31; i++) { 1685 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1686 iov[i].iov_len = 1024; 1687 } 1688 iov[31].iov_base = (void *)0xFEED1F00000; 1689 iov[31].iov_len = 32768; 1690 iov[32].iov_base = (void *)0xFEED2000000; 1691 iov[32].iov_len = 160; 1692 iov[33].iov_base = (void *)0xFEED2100000; 1693 iov[33].iov_len = 4096; 1694 iov[34].iov_base = (void *)0xFEED2200000; 1695 iov[34].iov_len = 4096; 1696 iov[35].iov_base = (void *)0xFEED2300000; 1697 iov[35].iov_len = 4096; 1698 iov[36].iov_base = (void *)0xFEED2400000; 1699 iov[36].iov_len = 4096; 1700 iov[37].iov_base = (void *)0xFEED2500000; 1701 iov[37].iov_len = 4096; 1702 iov[38].iov_base = (void *)0xFEED2600000; 1703 iov[38].iov_len = 4096; 1704 iov[39].iov_base = (void *)0xFEED2700000; 1705 iov[39].iov_len = 4096; 1706 iov[40].iov_base = (void *)0xFEED2800000; 1707 iov[40].iov_len = 4096; 1708 iov[41].iov_base = (void *)0xFEED2900000; 1709 iov[41].iov_len = 4096; 1710 iov[42].iov_base = (void *)0xFEED2A00000; 1711 iov[42].iov_len = 4096; 1712 iov[43].iov_base = (void *)0xFEED2B00000; 1713 iov[43].iov_len = 12288; 1714 iov[44].iov_base = (void *)0xFEED2C00000; 1715 iov[44].iov_len = 8192; 1716 iov[45].iov_base = (void *)0xFEED2F00000; 1717 iov[45].iov_len = 4096; 1718 iov[46].iov_base = (void *)0xFEED3000000; 1719 iov[46].iov_len = 4096; 1720 iov[47].iov_base = (void *)0xFEED3100000; 1721 iov[47].iov_len = 4096; 1722 iov[48].iov_base = (void *)0xFEED3200000; 1723 iov[48].iov_len = 24576; 1724 iov[49].iov_base = (void *)0xFEED3300000; 1725 iov[49].iov_len = 16384; 1726 iov[50].iov_base = (void *)0xFEED3400000; 1727 iov[50].iov_len = 12288; 1728 iov[51].iov_base = (void *)0xFEED3500000; 1729 iov[51].iov_len = 4096; 1730 iov[52].iov_base = (void *)0xFEED3600000; 1731 iov[52].iov_len = 4096; 1732 iov[53].iov_base = (void *)0xFEED3700000; 1733 iov[53].iov_len = 4096; 1734 iov[54].iov_base = (void *)0xFEED3800000; 1735 iov[54].iov_len = 28672; 1736 iov[55].iov_base = (void *)0xFEED3900000; 1737 iov[55].iov_len = 20480; 1738 iov[56].iov_base = (void *)0xFEED3A00000; 1739 iov[56].iov_len = 4096; 1740 iov[57].iov_base = (void *)0xFEED3B00000; 1741 iov[57].iov_len = 12288; 1742 iov[58].iov_base = (void *)0xFEED3C00000; 1743 iov[58].iov_len = 4096; 1744 iov[59].iov_base = (void *)0xFEED3D00000; 1745 iov[59].iov_len = 4096; 1746 iov[60].iov_base = (void *)0xFEED3E00000; 1747 iov[60].iov_len = 352; 1748 1749 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1750 * of child iovs, 1751 */ 1752 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1753 expected_io->md_buf = md_buf; 1754 for (i = 0; i < 32; i++) { 1755 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1756 } 1757 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1758 1759 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1760 * split by the IO boundary requirement. 1761 */ 1762 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1763 expected_io->md_buf = md_buf + 126 * 8; 1764 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1765 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1766 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1767 1768 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1769 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1770 */ 1771 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1772 expected_io->md_buf = md_buf + 128 * 8; 1773 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1774 iov[33].iov_len - 864); 1775 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1776 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1777 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1778 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1779 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1780 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1781 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1782 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1783 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1784 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1785 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1786 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1787 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1788 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1789 1790 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1791 * first 864 bytes of iov[52] split by the IO boundary requirement. 1792 */ 1793 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1794 expected_io->md_buf = md_buf + 256 * 8; 1795 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1796 iov[46].iov_len - 864); 1797 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1798 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1799 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1800 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1801 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1802 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1803 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1804 1805 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1806 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1807 */ 1808 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1809 expected_io->md_buf = md_buf + 384 * 8; 1810 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1811 iov[52].iov_len - 864); 1812 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1813 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1814 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1815 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1816 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1817 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1818 1819 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1820 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1821 */ 1822 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1823 expected_io->md_buf = md_buf + 512 * 8; 1824 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1825 iov[57].iov_len - 4960); 1826 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1827 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1828 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1829 1830 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1831 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1832 expected_io->md_buf = md_buf + 542 * 8; 1833 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1834 iov[59].iov_len - 3936); 1835 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1836 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1837 1838 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1839 0, 543, io_done, NULL); 1840 CU_ASSERT(rc == 0); 1841 CU_ASSERT(g_io_done == false); 1842 1843 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1844 stub_complete_io(1); 1845 CU_ASSERT(g_io_done == false); 1846 1847 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1848 stub_complete_io(5); 1849 CU_ASSERT(g_io_done == false); 1850 1851 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1852 stub_complete_io(1); 1853 CU_ASSERT(g_io_done == true); 1854 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1855 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1856 1857 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1858 * split, so test that. 1859 */ 1860 bdev->optimal_io_boundary = 15; 1861 g_io_done = false; 1862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1863 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1864 1865 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1866 CU_ASSERT(rc == 0); 1867 CU_ASSERT(g_io_done == false); 1868 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1869 stub_complete_io(1); 1870 CU_ASSERT(g_io_done == true); 1871 1872 /* Test an UNMAP. This should also not be split. */ 1873 bdev->optimal_io_boundary = 16; 1874 g_io_done = false; 1875 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1876 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1877 1878 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1879 CU_ASSERT(rc == 0); 1880 CU_ASSERT(g_io_done == false); 1881 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1882 stub_complete_io(1); 1883 CU_ASSERT(g_io_done == true); 1884 1885 /* Test a FLUSH. This should also not be split. */ 1886 bdev->optimal_io_boundary = 16; 1887 g_io_done = false; 1888 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1889 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1890 1891 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1892 CU_ASSERT(rc == 0); 1893 CU_ASSERT(g_io_done == false); 1894 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1895 stub_complete_io(1); 1896 CU_ASSERT(g_io_done == true); 1897 1898 /* Test a COPY. This should also not be split. */ 1899 bdev->optimal_io_boundary = 15; 1900 g_io_done = false; 1901 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1902 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1903 1904 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1905 CU_ASSERT(rc == 0); 1906 CU_ASSERT(g_io_done == false); 1907 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1908 stub_complete_io(1); 1909 CU_ASSERT(g_io_done == true); 1910 1911 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1912 1913 /* Children requests return an error status */ 1914 bdev->optimal_io_boundary = 16; 1915 iov[0].iov_base = (void *)0x10000; 1916 iov[0].iov_len = 512 * 64; 1917 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1918 g_io_done = false; 1919 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1920 1921 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1922 CU_ASSERT(rc == 0); 1923 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1924 stub_complete_io(4); 1925 CU_ASSERT(g_io_done == false); 1926 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1927 stub_complete_io(1); 1928 CU_ASSERT(g_io_done == true); 1929 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1930 1931 /* Test if a multi vector command terminated with failure before continuing 1932 * splitting process when one of child I/O failed. 1933 * The multi vector command is as same as the above that needs to be split by strip 1934 * and then needs to be split further due to the capacity of child iovs. 1935 */ 1936 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1937 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1938 iov[i].iov_len = 512; 1939 } 1940 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1941 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1942 1943 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1944 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1945 1946 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1947 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1948 1949 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1950 1951 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1952 g_io_done = false; 1953 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1954 1955 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1956 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1957 CU_ASSERT(rc == 0); 1958 CU_ASSERT(g_io_done == false); 1959 1960 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1961 stub_complete_io(1); 1962 CU_ASSERT(g_io_done == true); 1963 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1964 1965 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1966 1967 /* for this test we will create the following conditions to hit the code path where 1968 * we are trying to send and IO following a split that has no iovs because we had to 1969 * trim them for alignment reasons. 1970 * 1971 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1972 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1973 * position 30 and overshoot by 0x2e. 1974 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1975 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1976 * which eliniates that vector so we just send the first split IO with 30 vectors 1977 * and let the completion pick up the last 2 vectors. 1978 */ 1979 bdev->optimal_io_boundary = 32; 1980 bdev->split_on_optimal_io_boundary = true; 1981 g_io_done = false; 1982 1983 /* Init all parent IOVs to 0x212 */ 1984 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1985 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1986 iov[i].iov_len = 0x212; 1987 } 1988 1989 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1990 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1991 /* expect 0-29 to be 1:1 with the parent iov */ 1992 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1993 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1994 } 1995 1996 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1997 * where 0x1e is the amount we overshot the 16K boundary 1998 */ 1999 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2000 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 2001 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2002 2003 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 2004 * shortened that take it to the next boundary and then a final one to get us to 2005 * 0x4200 bytes for the IO. 2006 */ 2007 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2008 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 2009 /* position 30 picked up the remaining bytes to the next boundary */ 2010 ut_expected_io_set_iov(expected_io, 0, 2011 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 2012 2013 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 2014 ut_expected_io_set_iov(expected_io, 1, 2015 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 2016 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2017 2018 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 2019 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2020 CU_ASSERT(rc == 0); 2021 CU_ASSERT(g_io_done == false); 2022 2023 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2024 stub_complete_io(1); 2025 CU_ASSERT(g_io_done == false); 2026 2027 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2028 stub_complete_io(1); 2029 CU_ASSERT(g_io_done == true); 2030 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2031 2032 spdk_put_io_channel(io_ch); 2033 spdk_bdev_close(desc); 2034 free_bdev(bdev); 2035 ut_fini_bdev(); 2036 } 2037 2038 static void 2039 bdev_io_max_size_and_segment_split_test(void) 2040 { 2041 struct spdk_bdev *bdev; 2042 struct spdk_bdev_desc *desc = NULL; 2043 struct spdk_io_channel *io_ch; 2044 struct spdk_bdev_opts bdev_opts = {}; 2045 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2046 struct ut_expected_io *expected_io; 2047 uint64_t i; 2048 int rc; 2049 2050 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2051 bdev_opts.bdev_io_pool_size = 512; 2052 bdev_opts.bdev_io_cache_size = 64; 2053 bdev_opts.opts_size = sizeof(bdev_opts); 2054 ut_init_bdev(&bdev_opts); 2055 2056 bdev = allocate_bdev("bdev0"); 2057 2058 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2059 CU_ASSERT(rc == 0); 2060 SPDK_CU_ASSERT_FATAL(desc != NULL); 2061 io_ch = spdk_bdev_get_io_channel(desc); 2062 CU_ASSERT(io_ch != NULL); 2063 2064 bdev->split_on_optimal_io_boundary = false; 2065 bdev->optimal_io_boundary = 0; 2066 2067 /* Case 0 max_num_segments == 0. 2068 * but segment size 2 * 512 > 512 2069 */ 2070 bdev->max_segment_size = 512; 2071 bdev->max_num_segments = 0; 2072 g_io_done = false; 2073 2074 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2075 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2076 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2077 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2078 2079 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2080 CU_ASSERT(rc == 0); 2081 CU_ASSERT(g_io_done == false); 2082 2083 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2084 stub_complete_io(1); 2085 CU_ASSERT(g_io_done == true); 2086 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2087 2088 /* Case 1 max_segment_size == 0 2089 * but iov num 2 > 1. 2090 */ 2091 bdev->max_segment_size = 0; 2092 bdev->max_num_segments = 1; 2093 g_io_done = false; 2094 2095 iov[0].iov_base = (void *)0x10000; 2096 iov[0].iov_len = 512; 2097 iov[1].iov_base = (void *)0x20000; 2098 iov[1].iov_len = 8 * 512; 2099 2100 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2101 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2102 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2103 2104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2105 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2106 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2107 2108 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2109 CU_ASSERT(rc == 0); 2110 CU_ASSERT(g_io_done == false); 2111 2112 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2113 stub_complete_io(2); 2114 CU_ASSERT(g_io_done == true); 2115 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2116 2117 /* Test that a non-vector command is split correctly. 2118 * Set up the expected values before calling spdk_bdev_read_blocks 2119 */ 2120 bdev->max_segment_size = 512; 2121 bdev->max_num_segments = 1; 2122 g_io_done = false; 2123 2124 /* Child IO 0 */ 2125 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2126 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2127 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2128 2129 /* Child IO 1 */ 2130 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2131 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2132 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2133 2134 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2135 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2136 CU_ASSERT(rc == 0); 2137 CU_ASSERT(g_io_done == false); 2138 2139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2140 stub_complete_io(2); 2141 CU_ASSERT(g_io_done == true); 2142 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2143 2144 /* Now set up a more complex, multi-vector command that needs to be split, 2145 * including splitting iovecs. 2146 */ 2147 bdev->max_segment_size = 2 * 512; 2148 bdev->max_num_segments = 1; 2149 g_io_done = false; 2150 2151 iov[0].iov_base = (void *)0x10000; 2152 iov[0].iov_len = 2 * 512; 2153 iov[1].iov_base = (void *)0x20000; 2154 iov[1].iov_len = 4 * 512; 2155 iov[2].iov_base = (void *)0x30000; 2156 iov[2].iov_len = 6 * 512; 2157 2158 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2159 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2160 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2161 2162 /* Split iov[1].size to 2 iov entries then split the segments */ 2163 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2164 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2165 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2166 2167 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2168 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2169 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2170 2171 /* Split iov[2].size to 3 iov entries then split the segments */ 2172 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2173 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2174 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2175 2176 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2177 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2178 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2179 2180 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2181 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2182 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2183 2184 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2185 CU_ASSERT(rc == 0); 2186 CU_ASSERT(g_io_done == false); 2187 2188 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2189 stub_complete_io(6); 2190 CU_ASSERT(g_io_done == true); 2191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2192 2193 /* Test multi vector command that needs to be split by strip and then needs to be 2194 * split further due to the capacity of parent IO child iovs. 2195 */ 2196 bdev->max_segment_size = 512; 2197 bdev->max_num_segments = 1; 2198 g_io_done = false; 2199 2200 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2201 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2202 iov[i].iov_len = 512 * 2; 2203 } 2204 2205 /* Each input iov.size is split into 2 iovs, 2206 * half of the input iov can fill all child iov entries of a single IO. 2207 */ 2208 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2209 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2210 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2211 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2212 2213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2214 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2215 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2216 } 2217 2218 /* The remaining iov is split in the second round */ 2219 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2220 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2221 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2222 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2223 2224 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2225 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2226 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2227 } 2228 2229 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2230 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2231 CU_ASSERT(rc == 0); 2232 CU_ASSERT(g_io_done == false); 2233 2234 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2235 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2236 CU_ASSERT(g_io_done == false); 2237 2238 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2239 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2240 CU_ASSERT(g_io_done == true); 2241 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2242 2243 /* A wrong case, a child IO that is divided does 2244 * not meet the principle of multiples of block size, 2245 * and exits with error 2246 */ 2247 bdev->max_segment_size = 512; 2248 bdev->max_num_segments = 1; 2249 g_io_done = false; 2250 2251 iov[0].iov_base = (void *)0x10000; 2252 iov[0].iov_len = 512 + 256; 2253 iov[1].iov_base = (void *)0x20000; 2254 iov[1].iov_len = 256; 2255 2256 /* iov[0] is split to 512 and 256. 2257 * 256 is less than a block size, and it is found 2258 * in the next round of split that it is the first child IO smaller than 2259 * the block size, so the error exit 2260 */ 2261 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2262 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2263 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2264 2265 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2266 CU_ASSERT(rc == 0); 2267 CU_ASSERT(g_io_done == false); 2268 2269 /* First child IO is OK */ 2270 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2271 stub_complete_io(1); 2272 CU_ASSERT(g_io_done == true); 2273 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2274 2275 /* error exit */ 2276 stub_complete_io(1); 2277 CU_ASSERT(g_io_done == true); 2278 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2279 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2280 2281 /* Test multi vector command that needs to be split by strip and then needs to be 2282 * split further due to the capacity of child iovs. 2283 * 2284 * In this case, the last two iovs need to be split, but it will exceed the capacity 2285 * of child iovs, so it needs to wait until the first batch completed. 2286 */ 2287 bdev->max_segment_size = 512; 2288 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2289 g_io_done = false; 2290 2291 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2292 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2293 iov[i].iov_len = 512; 2294 } 2295 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2296 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2297 iov[i].iov_len = 512 * 2; 2298 } 2299 2300 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2301 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2302 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2303 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2304 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2305 } 2306 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2307 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2308 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2309 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2310 2311 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2312 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2313 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2314 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2315 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2316 2317 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2318 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2319 CU_ASSERT(rc == 0); 2320 CU_ASSERT(g_io_done == false); 2321 2322 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2323 stub_complete_io(1); 2324 CU_ASSERT(g_io_done == false); 2325 2326 /* Next round */ 2327 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2328 stub_complete_io(1); 2329 CU_ASSERT(g_io_done == true); 2330 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2331 2332 /* This case is similar to the previous one, but the io composed of 2333 * the last few entries of child iov is not enough for a blocklen, so they 2334 * cannot be put into this IO, but wait until the next time. 2335 */ 2336 bdev->max_segment_size = 512; 2337 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2338 g_io_done = false; 2339 2340 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2341 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2342 iov[i].iov_len = 512; 2343 } 2344 2345 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2346 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2347 iov[i].iov_len = 128; 2348 } 2349 2350 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2351 * Because the left 2 iov is not enough for a blocklen. 2352 */ 2353 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2354 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2355 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2356 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2357 } 2358 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2359 2360 /* The second child io waits until the end of the first child io before executing. 2361 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2362 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2363 */ 2364 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2365 1, 4); 2366 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2367 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2368 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2369 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2370 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2371 2372 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2373 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2374 CU_ASSERT(rc == 0); 2375 CU_ASSERT(g_io_done == false); 2376 2377 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2378 stub_complete_io(1); 2379 CU_ASSERT(g_io_done == false); 2380 2381 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2382 stub_complete_io(1); 2383 CU_ASSERT(g_io_done == true); 2384 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2385 2386 /* A very complicated case. Each sg entry exceeds max_segment_size and 2387 * needs to be split. At the same time, child io must be a multiple of blocklen. 2388 * At the same time, child iovcnt exceeds parent iovcnt. 2389 */ 2390 bdev->max_segment_size = 512 + 128; 2391 bdev->max_num_segments = 3; 2392 g_io_done = false; 2393 2394 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2395 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2396 iov[i].iov_len = 512 + 256; 2397 } 2398 2399 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2400 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2401 iov[i].iov_len = 512 + 128; 2402 } 2403 2404 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2405 * Consume 4 parent IO iov entries per for() round and 6 block size. 2406 * Generate 9 child IOs. 2407 */ 2408 for (i = 0; i < 3; i++) { 2409 uint32_t j = i * 4; 2410 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2411 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2412 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2413 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2415 2416 /* Child io must be a multiple of blocklen 2417 * iov[j + 2] must be split. If the third entry is also added, 2418 * the multiple of blocklen cannot be guaranteed. But it still 2419 * occupies one iov entry of the parent child iov. 2420 */ 2421 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2422 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2423 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2425 2426 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2427 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2428 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2429 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2431 } 2432 2433 /* Child iov position at 27, the 10th child IO 2434 * iov entry index is 3 * 4 and offset is 3 * 6 2435 */ 2436 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2437 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2438 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2439 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2440 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2441 2442 /* Child iov position at 30, the 11th child IO */ 2443 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2444 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2445 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2446 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2447 2448 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2449 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2450 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2451 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2452 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2453 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2454 2455 /* Consume 9 child IOs and 27 child iov entries. 2456 * Consume 4 parent IO iov entries per for() round and 6 block size. 2457 * Parent IO iov index start from 16 and block offset start from 24 2458 */ 2459 for (i = 0; i < 3; i++) { 2460 uint32_t j = i * 4 + 16; 2461 uint32_t offset = i * 6 + 24; 2462 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2463 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2464 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2465 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2466 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2467 2468 /* Child io must be a multiple of blocklen 2469 * iov[j + 2] must be split. If the third entry is also added, 2470 * the multiple of blocklen cannot be guaranteed. But it still 2471 * occupies one iov entry of the parent child iov. 2472 */ 2473 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2474 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2475 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2476 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2477 2478 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2479 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2480 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2481 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2482 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2483 } 2484 2485 /* The 22th child IO, child iov position at 30 */ 2486 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2487 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2489 2490 /* The third round */ 2491 /* Here is the 23nd child IO and child iovpos is 0 */ 2492 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2493 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2494 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2495 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2496 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2497 2498 /* The 24th child IO */ 2499 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2500 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2501 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2502 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2503 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2504 2505 /* The 25th child IO */ 2506 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2507 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2508 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2509 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2510 2511 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2512 50, io_done, NULL); 2513 CU_ASSERT(rc == 0); 2514 CU_ASSERT(g_io_done == false); 2515 2516 /* Parent IO supports up to 32 child iovs, so it is calculated that 2517 * a maximum of 11 IOs can be split at a time, and the 2518 * splitting will continue after the first batch is over. 2519 */ 2520 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2521 stub_complete_io(11); 2522 CU_ASSERT(g_io_done == false); 2523 2524 /* The 2nd round */ 2525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2526 stub_complete_io(11); 2527 CU_ASSERT(g_io_done == false); 2528 2529 /* The last round */ 2530 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2531 stub_complete_io(3); 2532 CU_ASSERT(g_io_done == true); 2533 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2534 2535 /* Test an WRITE_ZEROES. This should also not be split. */ 2536 bdev->max_segment_size = 512; 2537 bdev->max_num_segments = 1; 2538 g_io_done = false; 2539 2540 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2541 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2542 2543 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2544 CU_ASSERT(rc == 0); 2545 CU_ASSERT(g_io_done == false); 2546 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2547 stub_complete_io(1); 2548 CU_ASSERT(g_io_done == true); 2549 2550 /* Test an UNMAP. This should also not be split. */ 2551 g_io_done = false; 2552 2553 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2555 2556 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2557 CU_ASSERT(rc == 0); 2558 CU_ASSERT(g_io_done == false); 2559 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2560 stub_complete_io(1); 2561 CU_ASSERT(g_io_done == true); 2562 2563 /* Test a FLUSH. This should also not be split. */ 2564 g_io_done = false; 2565 2566 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2567 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2568 2569 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2570 CU_ASSERT(rc == 0); 2571 CU_ASSERT(g_io_done == false); 2572 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2573 stub_complete_io(1); 2574 CU_ASSERT(g_io_done == true); 2575 2576 /* Test a COPY. This should also not be split. */ 2577 g_io_done = false; 2578 2579 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2580 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2581 2582 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2583 CU_ASSERT(rc == 0); 2584 CU_ASSERT(g_io_done == false); 2585 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2586 stub_complete_io(1); 2587 CU_ASSERT(g_io_done == true); 2588 2589 /* Test that IOs are split on max_rw_size */ 2590 bdev->max_rw_size = 2; 2591 bdev->max_segment_size = 0; 2592 bdev->max_num_segments = 0; 2593 g_io_done = false; 2594 2595 /* 5 blocks in a contiguous buffer */ 2596 iov[0].iov_base = (void *)0x10000; 2597 iov[0].iov_len = 5 * 512; 2598 2599 /* First: offset=0, num_blocks=2 */ 2600 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2601 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2602 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2603 /* Second: offset=2, num_blocks=2 */ 2604 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1); 2605 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512); 2606 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2607 /* Third: offset=4, num_blocks=1 */ 2608 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2609 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512); 2610 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2611 2612 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL); 2613 CU_ASSERT(rc == 0); 2614 CU_ASSERT(g_io_done == false); 2615 2616 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2617 stub_complete_io(3); 2618 CU_ASSERT(g_io_done == true); 2619 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2620 2621 /* Check splitting on both max_rw_size + max_num_segments */ 2622 bdev->max_rw_size = 2; 2623 bdev->max_num_segments = 2; 2624 bdev->max_segment_size = 0; 2625 g_io_done = false; 2626 2627 /* 5 blocks split across 4 iovs */ 2628 iov[0].iov_base = (void *)0x10000; 2629 iov[0].iov_len = 3 * 512; 2630 iov[1].iov_base = (void *)0x20000; 2631 iov[1].iov_len = 256; 2632 iov[2].iov_base = (void *)0x30000; 2633 iov[2].iov_len = 256; 2634 iov[3].iov_base = (void *)0x40000; 2635 iov[3].iov_len = 512; 2636 2637 /* First: offset=0, num_blocks=2, iovcnt=1 */ 2638 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2639 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2640 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2641 /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting 2642 * the rest of iov[0], and iov[1]+iov[2]) 2643 */ 2644 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1); 2645 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512); 2646 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2647 /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */ 2648 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2); 2649 ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256); 2650 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256); 2651 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2652 /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */ 2653 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2654 ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512); 2655 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2656 2657 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL); 2658 CU_ASSERT(rc == 0); 2659 CU_ASSERT(g_io_done == false); 2660 2661 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2662 stub_complete_io(4); 2663 CU_ASSERT(g_io_done == true); 2664 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2665 2666 /* Check splitting on both max_rw_size + max_segment_size */ 2667 bdev->max_rw_size = 2; 2668 bdev->max_segment_size = 512; 2669 bdev->max_num_segments = 0; 2670 g_io_done = false; 2671 2672 /* 6 blocks in a contiguous buffer */ 2673 iov[0].iov_base = (void *)0x10000; 2674 iov[0].iov_len = 6 * 512; 2675 2676 /* We expect 3 IOs each with 2 blocks and 2 iovs */ 2677 for (i = 0; i < 3; ++i) { 2678 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2); 2679 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512); 2680 ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512); 2681 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2682 } 2683 2684 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL); 2685 CU_ASSERT(rc == 0); 2686 CU_ASSERT(g_io_done == false); 2687 2688 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2689 stub_complete_io(3); 2690 CU_ASSERT(g_io_done == true); 2691 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2692 2693 /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */ 2694 bdev->max_rw_size = 1; 2695 bdev->max_segment_size = 0; 2696 bdev->max_num_segments = 0; 2697 g_io_done = false; 2698 2699 /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */ 2700 iov[0].iov_base = (void *)0x10000; 2701 iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512; 2702 2703 /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */ 2704 for (i = 0; i < 3; ++i) { 2705 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1); 2706 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512); 2707 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2708 } 2709 2710 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2711 CU_ASSERT(rc == 0); 2712 CU_ASSERT(g_io_done == false); 2713 2714 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2715 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2716 CU_ASSERT(g_io_done == false); 2717 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2718 stub_complete_io(1); 2719 CU_ASSERT(g_io_done == true); 2720 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2721 2722 spdk_put_io_channel(io_ch); 2723 spdk_bdev_close(desc); 2724 free_bdev(bdev); 2725 ut_fini_bdev(); 2726 } 2727 2728 static void 2729 bdev_io_mix_split_test(void) 2730 { 2731 struct spdk_bdev *bdev; 2732 struct spdk_bdev_desc *desc = NULL; 2733 struct spdk_io_channel *io_ch; 2734 struct spdk_bdev_opts bdev_opts = {}; 2735 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2736 struct ut_expected_io *expected_io; 2737 uint64_t i; 2738 int rc; 2739 2740 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2741 bdev_opts.bdev_io_pool_size = 512; 2742 bdev_opts.bdev_io_cache_size = 64; 2743 ut_init_bdev(&bdev_opts); 2744 2745 bdev = allocate_bdev("bdev0"); 2746 2747 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2748 CU_ASSERT(rc == 0); 2749 SPDK_CU_ASSERT_FATAL(desc != NULL); 2750 io_ch = spdk_bdev_get_io_channel(desc); 2751 CU_ASSERT(io_ch != NULL); 2752 2753 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2754 bdev->split_on_optimal_io_boundary = true; 2755 bdev->optimal_io_boundary = 16; 2756 2757 bdev->max_segment_size = 512; 2758 bdev->max_num_segments = 16; 2759 g_io_done = false; 2760 2761 /* IO crossing the IO boundary requires split 2762 * Total 2 child IOs. 2763 */ 2764 2765 /* The 1st child IO split the segment_size to multiple segment entry */ 2766 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2767 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2768 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2769 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2770 2771 /* The 2nd child IO split the segment_size to multiple segment entry */ 2772 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2773 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2774 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2775 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2776 2777 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2778 CU_ASSERT(rc == 0); 2779 CU_ASSERT(g_io_done == false); 2780 2781 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2782 stub_complete_io(2); 2783 CU_ASSERT(g_io_done == true); 2784 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2785 2786 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2787 bdev->max_segment_size = 15 * 512; 2788 bdev->max_num_segments = 1; 2789 g_io_done = false; 2790 2791 /* IO crossing the IO boundary requires split. 2792 * The 1st child IO segment size exceeds the max_segment_size, 2793 * So 1st child IO will be split to multiple segment entry. 2794 * Then it split to 2 child IOs because of the max_num_segments. 2795 * Total 3 child IOs. 2796 */ 2797 2798 /* The first 2 IOs are in an IO boundary. 2799 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2800 * So it split to the first 2 IOs. 2801 */ 2802 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2803 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2804 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2805 2806 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2807 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2808 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2809 2810 /* The 3rd Child IO is because of the io boundary */ 2811 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2812 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2813 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2814 2815 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2816 CU_ASSERT(rc == 0); 2817 CU_ASSERT(g_io_done == false); 2818 2819 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2820 stub_complete_io(3); 2821 CU_ASSERT(g_io_done == true); 2822 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2823 2824 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2825 bdev->max_segment_size = 17 * 512; 2826 bdev->max_num_segments = 1; 2827 g_io_done = false; 2828 2829 /* IO crossing the IO boundary requires split. 2830 * Child IO does not split. 2831 * Total 2 child IOs. 2832 */ 2833 2834 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2835 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2836 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2837 2838 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2839 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2840 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2841 2842 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2843 CU_ASSERT(rc == 0); 2844 CU_ASSERT(g_io_done == false); 2845 2846 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2847 stub_complete_io(2); 2848 CU_ASSERT(g_io_done == true); 2849 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2850 2851 /* Now set up a more complex, multi-vector command that needs to be split, 2852 * including splitting iovecs. 2853 * optimal_io_boundary < max_segment_size * max_num_segments 2854 */ 2855 bdev->max_segment_size = 3 * 512; 2856 bdev->max_num_segments = 6; 2857 g_io_done = false; 2858 2859 iov[0].iov_base = (void *)0x10000; 2860 iov[0].iov_len = 4 * 512; 2861 iov[1].iov_base = (void *)0x20000; 2862 iov[1].iov_len = 4 * 512; 2863 iov[2].iov_base = (void *)0x30000; 2864 iov[2].iov_len = 10 * 512; 2865 2866 /* IO crossing the IO boundary requires split. 2867 * The 1st child IO segment size exceeds the max_segment_size and after 2868 * splitting segment_size, the num_segments exceeds max_num_segments. 2869 * So 1st child IO will be split to 2 child IOs. 2870 * Total 3 child IOs. 2871 */ 2872 2873 /* The first 2 IOs are in an IO boundary. 2874 * After splitting segment size the segment num exceeds. 2875 * So it splits to 2 child IOs. 2876 */ 2877 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2878 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2879 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2880 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2881 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2882 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2883 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2884 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2885 2886 /* The 2nd child IO has the left segment entry */ 2887 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2888 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2889 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2890 2891 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2892 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2893 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2894 2895 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2896 CU_ASSERT(rc == 0); 2897 CU_ASSERT(g_io_done == false); 2898 2899 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2900 stub_complete_io(3); 2901 CU_ASSERT(g_io_done == true); 2902 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2903 2904 /* A very complicated case. Each sg entry exceeds max_segment_size 2905 * and split on io boundary. 2906 * optimal_io_boundary < max_segment_size * max_num_segments 2907 */ 2908 bdev->max_segment_size = 3 * 512; 2909 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2910 g_io_done = false; 2911 2912 for (i = 0; i < 20; i++) { 2913 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2914 iov[i].iov_len = 512 * 4; 2915 } 2916 2917 /* IO crossing the IO boundary requires split. 2918 * 80 block length can split 5 child IOs base on offset and IO boundary. 2919 * Each iov entry needs to be split to 2 entries because of max_segment_size 2920 * Total 5 child IOs. 2921 */ 2922 2923 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2924 * So each child IO occupies 8 child iov entries. 2925 */ 2926 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2927 for (i = 0; i < 4; i++) { 2928 int iovcnt = i * 2; 2929 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2930 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2931 } 2932 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2933 2934 /* 2nd child IO and total 16 child iov entries of parent IO */ 2935 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2936 for (i = 4; i < 8; i++) { 2937 int iovcnt = (i - 4) * 2; 2938 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2939 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2940 } 2941 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2942 2943 /* 3rd child IO and total 24 child iov entries of parent IO */ 2944 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2945 for (i = 8; i < 12; i++) { 2946 int iovcnt = (i - 8) * 2; 2947 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2948 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2949 } 2950 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2951 2952 /* 4th child IO and total 32 child iov entries of parent IO */ 2953 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2954 for (i = 12; i < 16; i++) { 2955 int iovcnt = (i - 12) * 2; 2956 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2957 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2958 } 2959 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2960 2961 /* 5th child IO and because of the child iov entry it should be split 2962 * in next round. 2963 */ 2964 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2965 for (i = 16; i < 20; i++) { 2966 int iovcnt = (i - 16) * 2; 2967 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2968 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2969 } 2970 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2971 2972 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2973 CU_ASSERT(rc == 0); 2974 CU_ASSERT(g_io_done == false); 2975 2976 /* First split round */ 2977 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2978 stub_complete_io(4); 2979 CU_ASSERT(g_io_done == false); 2980 2981 /* Second split round */ 2982 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2983 stub_complete_io(1); 2984 CU_ASSERT(g_io_done == true); 2985 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2986 2987 spdk_put_io_channel(io_ch); 2988 spdk_bdev_close(desc); 2989 free_bdev(bdev); 2990 ut_fini_bdev(); 2991 } 2992 2993 static void 2994 bdev_io_split_with_io_wait(void) 2995 { 2996 struct spdk_bdev *bdev; 2997 struct spdk_bdev_desc *desc = NULL; 2998 struct spdk_io_channel *io_ch; 2999 struct spdk_bdev_channel *channel; 3000 struct spdk_bdev_mgmt_channel *mgmt_ch; 3001 struct spdk_bdev_opts bdev_opts = {}; 3002 struct iovec iov[3]; 3003 struct ut_expected_io *expected_io; 3004 int rc; 3005 3006 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3007 bdev_opts.bdev_io_pool_size = 2; 3008 bdev_opts.bdev_io_cache_size = 1; 3009 ut_init_bdev(&bdev_opts); 3010 3011 bdev = allocate_bdev("bdev0"); 3012 3013 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3014 CU_ASSERT(rc == 0); 3015 CU_ASSERT(desc != NULL); 3016 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3017 io_ch = spdk_bdev_get_io_channel(desc); 3018 CU_ASSERT(io_ch != NULL); 3019 channel = spdk_io_channel_get_ctx(io_ch); 3020 mgmt_ch = channel->shared_resource->mgmt_ch; 3021 3022 bdev->optimal_io_boundary = 16; 3023 bdev->split_on_optimal_io_boundary = true; 3024 3025 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 3026 CU_ASSERT(rc == 0); 3027 3028 /* Now test that a single-vector command is split correctly. 3029 * Offset 14, length 8, payload 0xF000 3030 * Child - Offset 14, length 2, payload 0xF000 3031 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3032 * 3033 * Set up the expected values before calling spdk_bdev_read_blocks 3034 */ 3035 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 3036 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 3037 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3038 3039 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 3040 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 3041 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3042 3043 /* The following children will be submitted sequentially due to the capacity of 3044 * spdk_bdev_io. 3045 */ 3046 3047 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 3048 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 3049 CU_ASSERT(rc == 0); 3050 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3052 3053 /* Completing the first read I/O will submit the first child */ 3054 stub_complete_io(1); 3055 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3056 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3057 3058 /* Completing the first child will submit the second child */ 3059 stub_complete_io(1); 3060 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3061 3062 /* Complete the second child I/O. This should result in our callback getting 3063 * invoked since the parent I/O is now complete. 3064 */ 3065 stub_complete_io(1); 3066 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3067 3068 /* Now set up a more complex, multi-vector command that needs to be split, 3069 * including splitting iovecs. 3070 */ 3071 iov[0].iov_base = (void *)0x10000; 3072 iov[0].iov_len = 512; 3073 iov[1].iov_base = (void *)0x20000; 3074 iov[1].iov_len = 20 * 512; 3075 iov[2].iov_base = (void *)0x30000; 3076 iov[2].iov_len = 11 * 512; 3077 3078 g_io_done = false; 3079 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 3080 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 3081 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 3082 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3083 3084 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 3085 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 3086 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3087 3088 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 3089 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 3090 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 3091 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3092 3093 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 3094 CU_ASSERT(rc == 0); 3095 CU_ASSERT(g_io_done == false); 3096 3097 /* The following children will be submitted sequentially due to the capacity of 3098 * spdk_bdev_io. 3099 */ 3100 3101 /* Completing the first child will submit the second child */ 3102 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3103 stub_complete_io(1); 3104 CU_ASSERT(g_io_done == false); 3105 3106 /* Completing the second child will submit the third child */ 3107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3108 stub_complete_io(1); 3109 CU_ASSERT(g_io_done == false); 3110 3111 /* Completing the third child will result in our callback getting invoked 3112 * since the parent I/O is now complete. 3113 */ 3114 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3115 stub_complete_io(1); 3116 CU_ASSERT(g_io_done == true); 3117 3118 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 3119 3120 spdk_put_io_channel(io_ch); 3121 spdk_bdev_close(desc); 3122 free_bdev(bdev); 3123 ut_fini_bdev(); 3124 } 3125 3126 static void 3127 bdev_io_write_unit_split_test(void) 3128 { 3129 struct spdk_bdev *bdev; 3130 struct spdk_bdev_desc *desc = NULL; 3131 struct spdk_io_channel *io_ch; 3132 struct spdk_bdev_opts bdev_opts = {}; 3133 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 3134 struct ut_expected_io *expected_io; 3135 uint64_t i; 3136 int rc; 3137 3138 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3139 bdev_opts.bdev_io_pool_size = 512; 3140 bdev_opts.bdev_io_cache_size = 64; 3141 ut_init_bdev(&bdev_opts); 3142 3143 bdev = allocate_bdev("bdev0"); 3144 3145 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 3146 CU_ASSERT(rc == 0); 3147 SPDK_CU_ASSERT_FATAL(desc != NULL); 3148 io_ch = spdk_bdev_get_io_channel(desc); 3149 CU_ASSERT(io_ch != NULL); 3150 3151 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 3152 bdev->write_unit_size = 32; 3153 bdev->split_on_write_unit = true; 3154 g_io_done = false; 3155 3156 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 3157 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 3158 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3159 3160 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 3161 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 3162 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3163 3164 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3165 CU_ASSERT(rc == 0); 3166 CU_ASSERT(g_io_done == false); 3167 3168 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3169 stub_complete_io(2); 3170 CU_ASSERT(g_io_done == true); 3171 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3172 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3173 3174 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 3175 * based on write_unit_size, not optimal_io_boundary */ 3176 bdev->split_on_optimal_io_boundary = true; 3177 bdev->optimal_io_boundary = 16; 3178 g_io_done = false; 3179 3180 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3181 CU_ASSERT(rc == 0); 3182 CU_ASSERT(g_io_done == false); 3183 3184 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3185 stub_complete_io(2); 3186 CU_ASSERT(g_io_done == true); 3187 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3188 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3189 3190 /* Write I/O should fail if it is smaller than write_unit_size */ 3191 g_io_done = false; 3192 3193 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3194 CU_ASSERT(rc == 0); 3195 CU_ASSERT(g_io_done == false); 3196 3197 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3198 poll_threads(); 3199 CU_ASSERT(g_io_done == true); 3200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3201 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3202 3203 /* Same for I/O not aligned to write_unit_size */ 3204 g_io_done = false; 3205 3206 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3207 CU_ASSERT(rc == 0); 3208 CU_ASSERT(g_io_done == false); 3209 3210 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3211 poll_threads(); 3212 CU_ASSERT(g_io_done == true); 3213 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3214 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3215 3216 /* Write should fail if it needs to be split but there are not enough iovs to submit 3217 * an entire write unit */ 3218 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3219 g_io_done = false; 3220 3221 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3222 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3223 iov[i].iov_len = 512; 3224 } 3225 3226 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3227 io_done, NULL); 3228 CU_ASSERT(rc == 0); 3229 CU_ASSERT(g_io_done == false); 3230 3231 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3232 poll_threads(); 3233 CU_ASSERT(g_io_done == true); 3234 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3235 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3236 3237 spdk_put_io_channel(io_ch); 3238 spdk_bdev_close(desc); 3239 free_bdev(bdev); 3240 ut_fini_bdev(); 3241 } 3242 3243 static void 3244 bdev_io_alignment(void) 3245 { 3246 struct spdk_bdev *bdev; 3247 struct spdk_bdev_desc *desc = NULL; 3248 struct spdk_io_channel *io_ch; 3249 struct spdk_bdev_opts bdev_opts = {}; 3250 int rc; 3251 void *buf = NULL; 3252 struct iovec iovs[2]; 3253 int iovcnt; 3254 uint64_t alignment; 3255 3256 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3257 bdev_opts.bdev_io_pool_size = 20; 3258 bdev_opts.bdev_io_cache_size = 2; 3259 ut_init_bdev(&bdev_opts); 3260 3261 fn_table.submit_request = stub_submit_request_get_buf; 3262 bdev = allocate_bdev("bdev0"); 3263 3264 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3265 CU_ASSERT(rc == 0); 3266 CU_ASSERT(desc != NULL); 3267 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3268 io_ch = spdk_bdev_get_io_channel(desc); 3269 CU_ASSERT(io_ch != NULL); 3270 3271 /* Create aligned buffer */ 3272 rc = posix_memalign(&buf, 4096, 8192); 3273 SPDK_CU_ASSERT_FATAL(rc == 0); 3274 3275 /* Pass aligned single buffer with no alignment required */ 3276 alignment = 1; 3277 bdev->required_alignment = spdk_u32log2(alignment); 3278 3279 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3280 CU_ASSERT(rc == 0); 3281 stub_complete_io(1); 3282 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3283 alignment)); 3284 3285 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3286 CU_ASSERT(rc == 0); 3287 stub_complete_io(1); 3288 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3289 alignment)); 3290 3291 /* Pass unaligned single buffer with no alignment required */ 3292 alignment = 1; 3293 bdev->required_alignment = spdk_u32log2(alignment); 3294 3295 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3296 CU_ASSERT(rc == 0); 3297 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3298 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3299 stub_complete_io(1); 3300 3301 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3302 CU_ASSERT(rc == 0); 3303 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3304 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3305 stub_complete_io(1); 3306 3307 /* Pass unaligned single buffer with 512 alignment required */ 3308 alignment = 512; 3309 bdev->required_alignment = spdk_u32log2(alignment); 3310 3311 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3312 CU_ASSERT(rc == 0); 3313 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3314 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3315 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3316 alignment)); 3317 stub_complete_io(1); 3318 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3319 3320 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3321 CU_ASSERT(rc == 0); 3322 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3323 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3324 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3325 alignment)); 3326 stub_complete_io(1); 3327 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3328 3329 /* Pass unaligned single buffer with 4096 alignment required */ 3330 alignment = 4096; 3331 bdev->required_alignment = spdk_u32log2(alignment); 3332 3333 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3334 CU_ASSERT(rc == 0); 3335 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3336 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3337 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3338 alignment)); 3339 stub_complete_io(1); 3340 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3341 3342 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3343 CU_ASSERT(rc == 0); 3344 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3345 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3346 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3347 alignment)); 3348 stub_complete_io(1); 3349 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3350 3351 /* Pass aligned iovs with no alignment required */ 3352 alignment = 1; 3353 bdev->required_alignment = spdk_u32log2(alignment); 3354 3355 iovcnt = 1; 3356 iovs[0].iov_base = buf; 3357 iovs[0].iov_len = 512; 3358 3359 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3360 CU_ASSERT(rc == 0); 3361 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3362 stub_complete_io(1); 3363 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3364 3365 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3366 CU_ASSERT(rc == 0); 3367 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3368 stub_complete_io(1); 3369 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3370 3371 /* Pass unaligned iovs with no alignment required */ 3372 alignment = 1; 3373 bdev->required_alignment = spdk_u32log2(alignment); 3374 3375 iovcnt = 2; 3376 iovs[0].iov_base = buf + 16; 3377 iovs[0].iov_len = 256; 3378 iovs[1].iov_base = buf + 16 + 256 + 32; 3379 iovs[1].iov_len = 256; 3380 3381 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3382 CU_ASSERT(rc == 0); 3383 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3384 stub_complete_io(1); 3385 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3386 3387 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3388 CU_ASSERT(rc == 0); 3389 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3390 stub_complete_io(1); 3391 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3392 3393 /* Pass unaligned iov with 2048 alignment required */ 3394 alignment = 2048; 3395 bdev->required_alignment = spdk_u32log2(alignment); 3396 3397 iovcnt = 2; 3398 iovs[0].iov_base = buf + 16; 3399 iovs[0].iov_len = 256; 3400 iovs[1].iov_base = buf + 16 + 256 + 32; 3401 iovs[1].iov_len = 256; 3402 3403 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3404 CU_ASSERT(rc == 0); 3405 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3406 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3407 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3408 alignment)); 3409 stub_complete_io(1); 3410 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3411 3412 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3413 CU_ASSERT(rc == 0); 3414 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3415 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3416 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3417 alignment)); 3418 stub_complete_io(1); 3419 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3420 3421 /* Pass iov without allocated buffer without alignment required */ 3422 alignment = 1; 3423 bdev->required_alignment = spdk_u32log2(alignment); 3424 3425 iovcnt = 1; 3426 iovs[0].iov_base = NULL; 3427 iovs[0].iov_len = 0; 3428 3429 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3430 CU_ASSERT(rc == 0); 3431 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3432 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3433 alignment)); 3434 stub_complete_io(1); 3435 3436 /* Pass iov without allocated buffer with 1024 alignment required */ 3437 alignment = 1024; 3438 bdev->required_alignment = spdk_u32log2(alignment); 3439 3440 iovcnt = 1; 3441 iovs[0].iov_base = NULL; 3442 iovs[0].iov_len = 0; 3443 3444 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3445 CU_ASSERT(rc == 0); 3446 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3447 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3448 alignment)); 3449 stub_complete_io(1); 3450 3451 spdk_put_io_channel(io_ch); 3452 spdk_bdev_close(desc); 3453 free_bdev(bdev); 3454 fn_table.submit_request = stub_submit_request; 3455 ut_fini_bdev(); 3456 3457 free(buf); 3458 } 3459 3460 static void 3461 bdev_io_alignment_with_boundary(void) 3462 { 3463 struct spdk_bdev *bdev; 3464 struct spdk_bdev_desc *desc = NULL; 3465 struct spdk_io_channel *io_ch; 3466 struct spdk_bdev_opts bdev_opts = {}; 3467 int rc; 3468 void *buf = NULL; 3469 struct iovec iovs[2]; 3470 int iovcnt; 3471 uint64_t alignment; 3472 3473 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3474 bdev_opts.bdev_io_pool_size = 20; 3475 bdev_opts.bdev_io_cache_size = 2; 3476 bdev_opts.opts_size = sizeof(bdev_opts); 3477 ut_init_bdev(&bdev_opts); 3478 3479 fn_table.submit_request = stub_submit_request_get_buf; 3480 bdev = allocate_bdev("bdev0"); 3481 3482 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3483 CU_ASSERT(rc == 0); 3484 CU_ASSERT(desc != NULL); 3485 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3486 io_ch = spdk_bdev_get_io_channel(desc); 3487 CU_ASSERT(io_ch != NULL); 3488 3489 /* Create aligned buffer */ 3490 rc = posix_memalign(&buf, 4096, 131072); 3491 SPDK_CU_ASSERT_FATAL(rc == 0); 3492 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3493 3494 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3495 alignment = 512; 3496 bdev->required_alignment = spdk_u32log2(alignment); 3497 bdev->optimal_io_boundary = 2; 3498 bdev->split_on_optimal_io_boundary = true; 3499 3500 iovcnt = 1; 3501 iovs[0].iov_base = NULL; 3502 iovs[0].iov_len = 512 * 3; 3503 3504 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3505 CU_ASSERT(rc == 0); 3506 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3507 stub_complete_io(2); 3508 3509 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3510 alignment = 512; 3511 bdev->required_alignment = spdk_u32log2(alignment); 3512 bdev->optimal_io_boundary = 16; 3513 bdev->split_on_optimal_io_boundary = true; 3514 3515 iovcnt = 1; 3516 iovs[0].iov_base = NULL; 3517 iovs[0].iov_len = 512 * 16; 3518 3519 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3520 CU_ASSERT(rc == 0); 3521 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3522 stub_complete_io(2); 3523 3524 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3525 alignment = 512; 3526 bdev->required_alignment = spdk_u32log2(alignment); 3527 bdev->optimal_io_boundary = 128; 3528 bdev->split_on_optimal_io_boundary = true; 3529 3530 iovcnt = 1; 3531 iovs[0].iov_base = buf + 16; 3532 iovs[0].iov_len = 512 * 160; 3533 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3534 CU_ASSERT(rc == 0); 3535 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3536 stub_complete_io(2); 3537 3538 /* 512 * 3 with 2 IO boundary */ 3539 alignment = 512; 3540 bdev->required_alignment = spdk_u32log2(alignment); 3541 bdev->optimal_io_boundary = 2; 3542 bdev->split_on_optimal_io_boundary = true; 3543 3544 iovcnt = 2; 3545 iovs[0].iov_base = buf + 16; 3546 iovs[0].iov_len = 512; 3547 iovs[1].iov_base = buf + 16 + 512 + 32; 3548 iovs[1].iov_len = 1024; 3549 3550 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3551 CU_ASSERT(rc == 0); 3552 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3553 stub_complete_io(2); 3554 3555 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3556 CU_ASSERT(rc == 0); 3557 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3558 stub_complete_io(2); 3559 3560 /* 512 * 64 with 32 IO boundary */ 3561 bdev->optimal_io_boundary = 32; 3562 iovcnt = 2; 3563 iovs[0].iov_base = buf + 16; 3564 iovs[0].iov_len = 16384; 3565 iovs[1].iov_base = buf + 16 + 16384 + 32; 3566 iovs[1].iov_len = 16384; 3567 3568 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3569 CU_ASSERT(rc == 0); 3570 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3571 stub_complete_io(3); 3572 3573 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3574 CU_ASSERT(rc == 0); 3575 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3576 stub_complete_io(3); 3577 3578 /* 512 * 160 with 32 IO boundary */ 3579 iovcnt = 1; 3580 iovs[0].iov_base = buf + 16; 3581 iovs[0].iov_len = 16384 + 65536; 3582 3583 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3584 CU_ASSERT(rc == 0); 3585 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3586 stub_complete_io(6); 3587 3588 spdk_put_io_channel(io_ch); 3589 spdk_bdev_close(desc); 3590 free_bdev(bdev); 3591 fn_table.submit_request = stub_submit_request; 3592 ut_fini_bdev(); 3593 3594 free(buf); 3595 } 3596 3597 static void 3598 histogram_status_cb(void *cb_arg, int status) 3599 { 3600 g_status = status; 3601 } 3602 3603 static void 3604 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3605 { 3606 g_status = status; 3607 g_histogram = histogram; 3608 } 3609 3610 static void 3611 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3612 uint64_t total, uint64_t so_far) 3613 { 3614 g_count += count; 3615 } 3616 3617 static void 3618 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3619 { 3620 spdk_histogram_data_fn cb_fn = cb_arg; 3621 3622 g_status = status; 3623 3624 if (status == 0) { 3625 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3626 } 3627 } 3628 3629 static void 3630 bdev_histograms(void) 3631 { 3632 struct spdk_bdev *bdev; 3633 struct spdk_bdev_desc *desc = NULL; 3634 struct spdk_io_channel *ch; 3635 struct spdk_histogram_data *histogram; 3636 uint8_t buf[4096]; 3637 int rc; 3638 3639 ut_init_bdev(NULL); 3640 3641 bdev = allocate_bdev("bdev"); 3642 3643 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3644 CU_ASSERT(rc == 0); 3645 CU_ASSERT(desc != NULL); 3646 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3647 3648 ch = spdk_bdev_get_io_channel(desc); 3649 CU_ASSERT(ch != NULL); 3650 3651 /* Enable histogram */ 3652 g_status = -1; 3653 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3654 poll_threads(); 3655 CU_ASSERT(g_status == 0); 3656 CU_ASSERT(bdev->internal.histogram_enabled == true); 3657 3658 /* Allocate histogram */ 3659 histogram = spdk_histogram_data_alloc(); 3660 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3661 3662 /* Check if histogram is zeroed */ 3663 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3664 poll_threads(); 3665 CU_ASSERT(g_status == 0); 3666 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3667 3668 g_count = 0; 3669 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3670 3671 CU_ASSERT(g_count == 0); 3672 3673 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3674 CU_ASSERT(rc == 0); 3675 3676 spdk_delay_us(10); 3677 stub_complete_io(1); 3678 poll_threads(); 3679 3680 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3681 CU_ASSERT(rc == 0); 3682 3683 spdk_delay_us(10); 3684 stub_complete_io(1); 3685 poll_threads(); 3686 3687 /* Check if histogram gathered data from all I/O channels */ 3688 g_histogram = NULL; 3689 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3690 poll_threads(); 3691 CU_ASSERT(g_status == 0); 3692 CU_ASSERT(bdev->internal.histogram_enabled == true); 3693 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3694 3695 g_count = 0; 3696 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3697 CU_ASSERT(g_count == 2); 3698 3699 g_count = 0; 3700 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3701 CU_ASSERT(g_status == 0); 3702 CU_ASSERT(g_count == 2); 3703 3704 /* Disable histogram */ 3705 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3706 poll_threads(); 3707 CU_ASSERT(g_status == 0); 3708 CU_ASSERT(bdev->internal.histogram_enabled == false); 3709 3710 /* Try to run histogram commands on disabled bdev */ 3711 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3712 poll_threads(); 3713 CU_ASSERT(g_status == -EFAULT); 3714 3715 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3716 CU_ASSERT(g_status == -EFAULT); 3717 3718 spdk_histogram_data_free(histogram); 3719 spdk_put_io_channel(ch); 3720 spdk_bdev_close(desc); 3721 free_bdev(bdev); 3722 ut_fini_bdev(); 3723 } 3724 3725 static void 3726 _bdev_compare(bool emulated) 3727 { 3728 struct spdk_bdev *bdev; 3729 struct spdk_bdev_desc *desc = NULL; 3730 struct spdk_io_channel *ioch; 3731 struct ut_expected_io *expected_io; 3732 uint64_t offset, num_blocks; 3733 uint32_t num_completed; 3734 char aa_buf[512]; 3735 char bb_buf[512]; 3736 struct iovec compare_iov; 3737 uint8_t expected_io_type; 3738 int rc; 3739 3740 if (emulated) { 3741 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3742 } else { 3743 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3744 } 3745 3746 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3747 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3748 3749 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3750 3751 ut_init_bdev(NULL); 3752 fn_table.submit_request = stub_submit_request_get_buf; 3753 bdev = allocate_bdev("bdev"); 3754 3755 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3756 CU_ASSERT_EQUAL(rc, 0); 3757 SPDK_CU_ASSERT_FATAL(desc != NULL); 3758 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3759 ioch = spdk_bdev_get_io_channel(desc); 3760 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3761 3762 fn_table.submit_request = stub_submit_request_get_buf; 3763 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3764 3765 offset = 50; 3766 num_blocks = 1; 3767 compare_iov.iov_base = aa_buf; 3768 compare_iov.iov_len = sizeof(aa_buf); 3769 3770 /* 1. successful comparev */ 3771 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3772 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3773 3774 g_io_done = false; 3775 g_compare_read_buf = aa_buf; 3776 g_compare_read_buf_len = sizeof(aa_buf); 3777 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3778 CU_ASSERT_EQUAL(rc, 0); 3779 num_completed = stub_complete_io(1); 3780 CU_ASSERT_EQUAL(num_completed, 1); 3781 CU_ASSERT(g_io_done == true); 3782 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3783 3784 /* 2. miscompare comparev */ 3785 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3786 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3787 3788 g_io_done = false; 3789 g_compare_read_buf = bb_buf; 3790 g_compare_read_buf_len = sizeof(bb_buf); 3791 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3792 CU_ASSERT_EQUAL(rc, 0); 3793 num_completed = stub_complete_io(1); 3794 CU_ASSERT_EQUAL(num_completed, 1); 3795 CU_ASSERT(g_io_done == true); 3796 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3797 3798 /* 3. successful compare */ 3799 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3800 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3801 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3802 3803 g_io_done = false; 3804 g_compare_read_buf = aa_buf; 3805 g_compare_read_buf_len = sizeof(aa_buf); 3806 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3807 CU_ASSERT_EQUAL(rc, 0); 3808 num_completed = stub_complete_io(1); 3809 CU_ASSERT_EQUAL(num_completed, 1); 3810 CU_ASSERT(g_io_done == true); 3811 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3812 3813 /* 4. miscompare compare */ 3814 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3815 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3816 3817 g_io_done = false; 3818 g_compare_read_buf = bb_buf; 3819 g_compare_read_buf_len = sizeof(bb_buf); 3820 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3821 CU_ASSERT_EQUAL(rc, 0); 3822 num_completed = stub_complete_io(1); 3823 CU_ASSERT_EQUAL(num_completed, 1); 3824 CU_ASSERT(g_io_done == true); 3825 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3826 3827 spdk_put_io_channel(ioch); 3828 spdk_bdev_close(desc); 3829 free_bdev(bdev); 3830 fn_table.submit_request = stub_submit_request; 3831 ut_fini_bdev(); 3832 3833 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3834 3835 g_compare_read_buf = NULL; 3836 } 3837 3838 static void 3839 _bdev_compare_with_md(bool emulated) 3840 { 3841 struct spdk_bdev *bdev; 3842 struct spdk_bdev_desc *desc = NULL; 3843 struct spdk_io_channel *ioch; 3844 struct ut_expected_io *expected_io; 3845 uint64_t offset, num_blocks; 3846 uint32_t num_completed; 3847 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3848 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3849 char buf_miscompare[1024 /* 2 * blocklen */]; 3850 char md_buf[16]; 3851 char md_buf_miscompare[16]; 3852 struct iovec compare_iov; 3853 uint8_t expected_io_type; 3854 int rc; 3855 3856 if (emulated) { 3857 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3858 } else { 3859 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3860 } 3861 3862 memset(buf, 0xaa, sizeof(buf)); 3863 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3864 /* make last md different */ 3865 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3866 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3867 memset(md_buf, 0xaa, 16); 3868 memset(md_buf_miscompare, 0xbb, 16); 3869 3870 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3871 3872 ut_init_bdev(NULL); 3873 fn_table.submit_request = stub_submit_request_get_buf; 3874 bdev = allocate_bdev("bdev"); 3875 3876 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3877 CU_ASSERT_EQUAL(rc, 0); 3878 SPDK_CU_ASSERT_FATAL(desc != NULL); 3879 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3880 ioch = spdk_bdev_get_io_channel(desc); 3881 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3882 3883 fn_table.submit_request = stub_submit_request_get_buf; 3884 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3885 3886 offset = 50; 3887 num_blocks = 2; 3888 3889 /* interleaved md & data */ 3890 bdev->md_interleave = true; 3891 bdev->md_len = 8; 3892 bdev->blocklen = 512 + 8; 3893 compare_iov.iov_base = buf; 3894 compare_iov.iov_len = sizeof(buf); 3895 3896 /* 1. successful compare with md interleaved */ 3897 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3898 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3899 3900 g_io_done = false; 3901 g_compare_read_buf = buf; 3902 g_compare_read_buf_len = sizeof(buf); 3903 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3904 CU_ASSERT_EQUAL(rc, 0); 3905 num_completed = stub_complete_io(1); 3906 CU_ASSERT_EQUAL(num_completed, 1); 3907 CU_ASSERT(g_io_done == true); 3908 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3909 3910 /* 2. miscompare with md interleaved */ 3911 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3912 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3913 3914 g_io_done = false; 3915 g_compare_read_buf = buf_interleaved_miscompare; 3916 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3917 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3918 CU_ASSERT_EQUAL(rc, 0); 3919 num_completed = stub_complete_io(1); 3920 CU_ASSERT_EQUAL(num_completed, 1); 3921 CU_ASSERT(g_io_done == true); 3922 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3923 3924 /* Separate data & md buffers */ 3925 bdev->md_interleave = false; 3926 bdev->blocklen = 512; 3927 compare_iov.iov_base = buf; 3928 compare_iov.iov_len = 1024; 3929 3930 /* 3. successful compare with md separated */ 3931 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3932 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3933 3934 g_io_done = false; 3935 g_compare_read_buf = buf; 3936 g_compare_read_buf_len = 1024; 3937 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3938 g_compare_md_buf = md_buf; 3939 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3940 offset, num_blocks, io_done, NULL); 3941 CU_ASSERT_EQUAL(rc, 0); 3942 num_completed = stub_complete_io(1); 3943 CU_ASSERT_EQUAL(num_completed, 1); 3944 CU_ASSERT(g_io_done == true); 3945 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3946 3947 /* 4. miscompare with md separated where md buf is different */ 3948 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3949 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3950 3951 g_io_done = false; 3952 g_compare_read_buf = buf; 3953 g_compare_read_buf_len = 1024; 3954 g_compare_md_buf = md_buf_miscompare; 3955 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3956 offset, num_blocks, io_done, NULL); 3957 CU_ASSERT_EQUAL(rc, 0); 3958 num_completed = stub_complete_io(1); 3959 CU_ASSERT_EQUAL(num_completed, 1); 3960 CU_ASSERT(g_io_done == true); 3961 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3962 3963 /* 5. miscompare with md separated where buf is different */ 3964 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3965 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3966 3967 g_io_done = false; 3968 g_compare_read_buf = buf_miscompare; 3969 g_compare_read_buf_len = sizeof(buf_miscompare); 3970 g_compare_md_buf = md_buf; 3971 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3972 offset, num_blocks, io_done, NULL); 3973 CU_ASSERT_EQUAL(rc, 0); 3974 num_completed = stub_complete_io(1); 3975 CU_ASSERT_EQUAL(num_completed, 1); 3976 CU_ASSERT(g_io_done == true); 3977 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3978 3979 bdev->md_len = 0; 3980 g_compare_md_buf = NULL; 3981 3982 spdk_put_io_channel(ioch); 3983 spdk_bdev_close(desc); 3984 free_bdev(bdev); 3985 fn_table.submit_request = stub_submit_request; 3986 ut_fini_bdev(); 3987 3988 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3989 3990 g_compare_read_buf = NULL; 3991 } 3992 3993 static void 3994 bdev_compare(void) 3995 { 3996 _bdev_compare(false); 3997 _bdev_compare_with_md(false); 3998 } 3999 4000 static void 4001 bdev_compare_emulated(void) 4002 { 4003 _bdev_compare(true); 4004 _bdev_compare_with_md(true); 4005 } 4006 4007 static void 4008 bdev_compare_and_write(void) 4009 { 4010 struct spdk_bdev *bdev; 4011 struct spdk_bdev_desc *desc = NULL; 4012 struct spdk_io_channel *ioch; 4013 struct ut_expected_io *expected_io; 4014 uint64_t offset, num_blocks; 4015 uint32_t num_completed; 4016 char aa_buf[512]; 4017 char bb_buf[512]; 4018 char cc_buf[512]; 4019 char write_buf[512]; 4020 struct iovec compare_iov; 4021 struct iovec write_iov; 4022 int rc; 4023 4024 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4025 memset(bb_buf, 0xbb, sizeof(bb_buf)); 4026 memset(cc_buf, 0xcc, sizeof(cc_buf)); 4027 4028 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 4029 4030 ut_init_bdev(NULL); 4031 fn_table.submit_request = stub_submit_request_get_buf; 4032 bdev = allocate_bdev("bdev"); 4033 4034 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4035 CU_ASSERT_EQUAL(rc, 0); 4036 SPDK_CU_ASSERT_FATAL(desc != NULL); 4037 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4038 ioch = spdk_bdev_get_io_channel(desc); 4039 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4040 4041 fn_table.submit_request = stub_submit_request_get_buf; 4042 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4043 4044 offset = 50; 4045 num_blocks = 1; 4046 compare_iov.iov_base = aa_buf; 4047 compare_iov.iov_len = sizeof(aa_buf); 4048 write_iov.iov_base = bb_buf; 4049 write_iov.iov_len = sizeof(bb_buf); 4050 4051 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4052 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4053 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 4054 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4055 4056 g_io_done = false; 4057 g_compare_read_buf = aa_buf; 4058 g_compare_read_buf_len = sizeof(aa_buf); 4059 memset(write_buf, 0, sizeof(write_buf)); 4060 g_compare_write_buf = write_buf; 4061 g_compare_write_buf_len = sizeof(write_buf); 4062 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4063 offset, num_blocks, io_done, NULL); 4064 /* Trigger range locking */ 4065 poll_threads(); 4066 CU_ASSERT_EQUAL(rc, 0); 4067 num_completed = stub_complete_io(1); 4068 CU_ASSERT_EQUAL(num_completed, 1); 4069 CU_ASSERT(g_io_done == false); 4070 num_completed = stub_complete_io(1); 4071 /* Trigger range unlocking */ 4072 poll_threads(); 4073 CU_ASSERT_EQUAL(num_completed, 1); 4074 CU_ASSERT(g_io_done == true); 4075 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4076 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 4077 4078 /* Test miscompare */ 4079 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4080 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4081 4082 g_io_done = false; 4083 g_compare_read_buf = cc_buf; 4084 g_compare_read_buf_len = sizeof(cc_buf); 4085 memset(write_buf, 0, sizeof(write_buf)); 4086 g_compare_write_buf = write_buf; 4087 g_compare_write_buf_len = sizeof(write_buf); 4088 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4089 offset, num_blocks, io_done, NULL); 4090 /* Trigger range locking */ 4091 poll_threads(); 4092 CU_ASSERT_EQUAL(rc, 0); 4093 num_completed = stub_complete_io(1); 4094 /* Trigger range unlocking earlier because we expect error here */ 4095 poll_threads(); 4096 CU_ASSERT_EQUAL(num_completed, 1); 4097 CU_ASSERT(g_io_done == true); 4098 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 4099 num_completed = stub_complete_io(1); 4100 CU_ASSERT_EQUAL(num_completed, 0); 4101 4102 spdk_put_io_channel(ioch); 4103 spdk_bdev_close(desc); 4104 free_bdev(bdev); 4105 fn_table.submit_request = stub_submit_request; 4106 ut_fini_bdev(); 4107 4108 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 4109 4110 g_compare_read_buf = NULL; 4111 g_compare_write_buf = NULL; 4112 } 4113 4114 static void 4115 bdev_write_zeroes(void) 4116 { 4117 struct spdk_bdev *bdev; 4118 struct spdk_bdev_desc *desc = NULL; 4119 struct spdk_io_channel *ioch; 4120 struct ut_expected_io *expected_io; 4121 uint64_t offset, num_io_blocks, num_blocks; 4122 uint32_t num_completed, num_requests; 4123 int rc; 4124 4125 ut_init_bdev(NULL); 4126 bdev = allocate_bdev("bdev"); 4127 4128 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4129 CU_ASSERT_EQUAL(rc, 0); 4130 SPDK_CU_ASSERT_FATAL(desc != NULL); 4131 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4132 ioch = spdk_bdev_get_io_channel(desc); 4133 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4134 4135 fn_table.submit_request = stub_submit_request; 4136 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4137 4138 /* First test that if the bdev supports write_zeroes, the request won't be split */ 4139 bdev->md_len = 0; 4140 bdev->blocklen = 4096; 4141 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4142 4143 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4144 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4145 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4146 CU_ASSERT_EQUAL(rc, 0); 4147 num_completed = stub_complete_io(1); 4148 CU_ASSERT_EQUAL(num_completed, 1); 4149 4150 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 4151 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 4152 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4153 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 4154 num_requests = 2; 4155 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 4156 4157 for (offset = 0; offset < num_requests; ++offset) { 4158 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4159 offset * num_io_blocks, num_io_blocks, 0); 4160 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4161 } 4162 4163 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4164 CU_ASSERT_EQUAL(rc, 0); 4165 num_completed = stub_complete_io(num_requests); 4166 CU_ASSERT_EQUAL(num_completed, num_requests); 4167 4168 /* Check that the splitting is correct if bdev has interleaved metadata */ 4169 bdev->md_interleave = true; 4170 bdev->md_len = 64; 4171 bdev->blocklen = 4096 + 64; 4172 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4173 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4174 4175 num_requests = offset = 0; 4176 while (offset < num_blocks) { 4177 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 4178 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4179 offset, num_io_blocks, 0); 4180 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4181 offset += num_io_blocks; 4182 num_requests++; 4183 } 4184 4185 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4186 CU_ASSERT_EQUAL(rc, 0); 4187 num_completed = stub_complete_io(num_requests); 4188 CU_ASSERT_EQUAL(num_completed, num_requests); 4189 num_completed = stub_complete_io(num_requests); 4190 assert(num_completed == 0); 4191 4192 /* Check the the same for separate metadata buffer */ 4193 bdev->md_interleave = false; 4194 bdev->md_len = 64; 4195 bdev->blocklen = 4096; 4196 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4197 4198 num_requests = offset = 0; 4199 while (offset < num_blocks) { 4200 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 4201 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4202 offset, num_io_blocks, 0); 4203 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 4204 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4205 offset += num_io_blocks; 4206 num_requests++; 4207 } 4208 4209 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4210 CU_ASSERT_EQUAL(rc, 0); 4211 num_completed = stub_complete_io(num_requests); 4212 CU_ASSERT_EQUAL(num_completed, num_requests); 4213 4214 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 4215 spdk_put_io_channel(ioch); 4216 spdk_bdev_close(desc); 4217 free_bdev(bdev); 4218 ut_fini_bdev(); 4219 } 4220 4221 static void 4222 bdev_zcopy_write(void) 4223 { 4224 struct spdk_bdev *bdev; 4225 struct spdk_bdev_desc *desc = NULL; 4226 struct spdk_io_channel *ioch; 4227 struct ut_expected_io *expected_io; 4228 uint64_t offset, num_blocks; 4229 uint32_t num_completed; 4230 char aa_buf[512]; 4231 struct iovec iov; 4232 int rc; 4233 const bool populate = false; 4234 const bool commit = true; 4235 4236 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4237 4238 ut_init_bdev(NULL); 4239 bdev = allocate_bdev("bdev"); 4240 4241 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4242 CU_ASSERT_EQUAL(rc, 0); 4243 SPDK_CU_ASSERT_FATAL(desc != NULL); 4244 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4245 ioch = spdk_bdev_get_io_channel(desc); 4246 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4247 4248 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4249 4250 offset = 50; 4251 num_blocks = 1; 4252 iov.iov_base = NULL; 4253 iov.iov_len = 0; 4254 4255 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4256 g_zcopy_read_buf_len = (uint32_t) -1; 4257 /* Do a zcopy start for a write (populate=false) */ 4258 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4259 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4260 g_io_done = false; 4261 g_zcopy_write_buf = aa_buf; 4262 g_zcopy_write_buf_len = sizeof(aa_buf); 4263 g_zcopy_bdev_io = NULL; 4264 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4265 CU_ASSERT_EQUAL(rc, 0); 4266 num_completed = stub_complete_io(1); 4267 CU_ASSERT_EQUAL(num_completed, 1); 4268 CU_ASSERT(g_io_done == true); 4269 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4270 /* Check that the iov has been set up */ 4271 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4272 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4273 /* Check that the bdev_io has been saved */ 4274 CU_ASSERT(g_zcopy_bdev_io != NULL); 4275 /* Now do the zcopy end for a write (commit=true) */ 4276 g_io_done = false; 4277 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4278 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4279 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4280 CU_ASSERT_EQUAL(rc, 0); 4281 num_completed = stub_complete_io(1); 4282 CU_ASSERT_EQUAL(num_completed, 1); 4283 CU_ASSERT(g_io_done == true); 4284 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4285 /* Check the g_zcopy are reset by io_done */ 4286 CU_ASSERT(g_zcopy_write_buf == NULL); 4287 CU_ASSERT(g_zcopy_write_buf_len == 0); 4288 /* Check that io_done has freed the g_zcopy_bdev_io */ 4289 CU_ASSERT(g_zcopy_bdev_io == NULL); 4290 4291 /* Check the zcopy read buffer has not been touched which 4292 * ensures that the correct buffers were used. 4293 */ 4294 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4295 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4296 4297 spdk_put_io_channel(ioch); 4298 spdk_bdev_close(desc); 4299 free_bdev(bdev); 4300 ut_fini_bdev(); 4301 } 4302 4303 static void 4304 bdev_zcopy_read(void) 4305 { 4306 struct spdk_bdev *bdev; 4307 struct spdk_bdev_desc *desc = NULL; 4308 struct spdk_io_channel *ioch; 4309 struct ut_expected_io *expected_io; 4310 uint64_t offset, num_blocks; 4311 uint32_t num_completed; 4312 char aa_buf[512]; 4313 struct iovec iov; 4314 int rc; 4315 const bool populate = true; 4316 const bool commit = false; 4317 4318 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4319 4320 ut_init_bdev(NULL); 4321 bdev = allocate_bdev("bdev"); 4322 4323 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4324 CU_ASSERT_EQUAL(rc, 0); 4325 SPDK_CU_ASSERT_FATAL(desc != NULL); 4326 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4327 ioch = spdk_bdev_get_io_channel(desc); 4328 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4329 4330 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4331 4332 offset = 50; 4333 num_blocks = 1; 4334 iov.iov_base = NULL; 4335 iov.iov_len = 0; 4336 4337 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4338 g_zcopy_write_buf_len = (uint32_t) -1; 4339 4340 /* Do a zcopy start for a read (populate=true) */ 4341 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4342 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4343 g_io_done = false; 4344 g_zcopy_read_buf = aa_buf; 4345 g_zcopy_read_buf_len = sizeof(aa_buf); 4346 g_zcopy_bdev_io = NULL; 4347 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4348 CU_ASSERT_EQUAL(rc, 0); 4349 num_completed = stub_complete_io(1); 4350 CU_ASSERT_EQUAL(num_completed, 1); 4351 CU_ASSERT(g_io_done == true); 4352 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4353 /* Check that the iov has been set up */ 4354 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4355 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4356 /* Check that the bdev_io has been saved */ 4357 CU_ASSERT(g_zcopy_bdev_io != NULL); 4358 4359 /* Now do the zcopy end for a read (commit=false) */ 4360 g_io_done = false; 4361 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4362 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4363 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4364 CU_ASSERT_EQUAL(rc, 0); 4365 num_completed = stub_complete_io(1); 4366 CU_ASSERT_EQUAL(num_completed, 1); 4367 CU_ASSERT(g_io_done == true); 4368 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4369 /* Check the g_zcopy are reset by io_done */ 4370 CU_ASSERT(g_zcopy_read_buf == NULL); 4371 CU_ASSERT(g_zcopy_read_buf_len == 0); 4372 /* Check that io_done has freed the g_zcopy_bdev_io */ 4373 CU_ASSERT(g_zcopy_bdev_io == NULL); 4374 4375 /* Check the zcopy write buffer has not been touched which 4376 * ensures that the correct buffers were used. 4377 */ 4378 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4379 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4380 4381 spdk_put_io_channel(ioch); 4382 spdk_bdev_close(desc); 4383 free_bdev(bdev); 4384 ut_fini_bdev(); 4385 } 4386 4387 static void 4388 bdev_open_while_hotremove(void) 4389 { 4390 struct spdk_bdev *bdev; 4391 struct spdk_bdev_desc *desc[2] = {}; 4392 int rc; 4393 4394 bdev = allocate_bdev("bdev"); 4395 4396 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4397 CU_ASSERT(rc == 0); 4398 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4399 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4400 4401 spdk_bdev_unregister(bdev, NULL, NULL); 4402 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4403 poll_threads(); 4404 4405 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4406 CU_ASSERT(rc == -ENODEV); 4407 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4408 4409 spdk_bdev_close(desc[0]); 4410 free_bdev(bdev); 4411 } 4412 4413 static void 4414 bdev_close_while_hotremove(void) 4415 { 4416 struct spdk_bdev *bdev; 4417 struct spdk_bdev_desc *desc = NULL; 4418 int rc = 0; 4419 4420 bdev = allocate_bdev("bdev"); 4421 4422 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4423 CU_ASSERT_EQUAL(rc, 0); 4424 SPDK_CU_ASSERT_FATAL(desc != NULL); 4425 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4426 4427 /* Simulate hot-unplug by unregistering bdev */ 4428 g_event_type1 = 0xFF; 4429 g_unregister_arg = NULL; 4430 g_unregister_rc = -1; 4431 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4432 /* Close device while remove event is in flight */ 4433 spdk_bdev_close(desc); 4434 4435 /* Ensure that unregister callback is delayed */ 4436 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4437 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4438 4439 poll_threads(); 4440 4441 /* Event callback shall not be issued because device was closed */ 4442 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4443 /* Unregister callback is issued */ 4444 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4445 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4446 4447 free_bdev(bdev); 4448 } 4449 4450 static void 4451 bdev_open_ext_test(void) 4452 { 4453 struct spdk_bdev *bdev; 4454 struct spdk_bdev_desc *desc1 = NULL; 4455 struct spdk_bdev_desc *desc2 = NULL; 4456 int rc = 0; 4457 4458 bdev = allocate_bdev("bdev"); 4459 4460 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4461 CU_ASSERT_EQUAL(rc, -EINVAL); 4462 4463 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4464 CU_ASSERT_EQUAL(rc, 0); 4465 4466 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4467 CU_ASSERT_EQUAL(rc, 0); 4468 4469 g_event_type1 = 0xFF; 4470 g_event_type2 = 0xFF; 4471 4472 /* Simulate hot-unplug by unregistering bdev */ 4473 spdk_bdev_unregister(bdev, NULL, NULL); 4474 poll_threads(); 4475 4476 /* Check if correct events have been triggered in event callback fn */ 4477 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4478 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4479 4480 free_bdev(bdev); 4481 poll_threads(); 4482 } 4483 4484 static void 4485 bdev_open_ext_unregister(void) 4486 { 4487 struct spdk_bdev *bdev; 4488 struct spdk_bdev_desc *desc1 = NULL; 4489 struct spdk_bdev_desc *desc2 = NULL; 4490 struct spdk_bdev_desc *desc3 = NULL; 4491 struct spdk_bdev_desc *desc4 = NULL; 4492 int rc = 0; 4493 4494 bdev = allocate_bdev("bdev"); 4495 4496 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4497 CU_ASSERT_EQUAL(rc, -EINVAL); 4498 4499 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4500 CU_ASSERT_EQUAL(rc, 0); 4501 4502 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4503 CU_ASSERT_EQUAL(rc, 0); 4504 4505 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4506 CU_ASSERT_EQUAL(rc, 0); 4507 4508 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4509 CU_ASSERT_EQUAL(rc, 0); 4510 4511 g_event_type1 = 0xFF; 4512 g_event_type2 = 0xFF; 4513 g_event_type3 = 0xFF; 4514 g_event_type4 = 0xFF; 4515 4516 g_unregister_arg = NULL; 4517 g_unregister_rc = -1; 4518 4519 /* Simulate hot-unplug by unregistering bdev */ 4520 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4521 4522 /* 4523 * Unregister is handled asynchronously and event callback 4524 * (i.e., above bdev_open_cbN) will be called. 4525 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4526 * close the desc3 and desc4 so that the bdev is not closed. 4527 */ 4528 poll_threads(); 4529 4530 /* Check if correct events have been triggered in event callback fn */ 4531 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4532 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4533 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4534 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4535 4536 /* Check that unregister callback is delayed */ 4537 CU_ASSERT(g_unregister_arg == NULL); 4538 CU_ASSERT(g_unregister_rc == -1); 4539 4540 /* 4541 * Explicitly close desc3. As desc4 is still opened there, the 4542 * unergister callback is still delayed to execute. 4543 */ 4544 spdk_bdev_close(desc3); 4545 CU_ASSERT(g_unregister_arg == NULL); 4546 CU_ASSERT(g_unregister_rc == -1); 4547 4548 /* 4549 * Explicitly close desc4 to trigger the ongoing bdev unregister 4550 * operation after last desc is closed. 4551 */ 4552 spdk_bdev_close(desc4); 4553 4554 /* Poll the thread for the async unregister operation */ 4555 poll_threads(); 4556 4557 /* Check that unregister callback is executed */ 4558 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4559 CU_ASSERT(g_unregister_rc == 0); 4560 4561 free_bdev(bdev); 4562 poll_threads(); 4563 } 4564 4565 struct timeout_io_cb_arg { 4566 struct iovec iov; 4567 uint8_t type; 4568 }; 4569 4570 static int 4571 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4572 { 4573 struct spdk_bdev_io *bdev_io; 4574 int n = 0; 4575 4576 if (!ch) { 4577 return -1; 4578 } 4579 4580 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4581 n++; 4582 } 4583 4584 return n; 4585 } 4586 4587 static void 4588 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4589 { 4590 struct timeout_io_cb_arg *ctx = cb_arg; 4591 4592 ctx->type = bdev_io->type; 4593 ctx->iov.iov_base = bdev_io->iov.iov_base; 4594 ctx->iov.iov_len = bdev_io->iov.iov_len; 4595 } 4596 4597 static void 4598 bdev_set_io_timeout(void) 4599 { 4600 struct spdk_bdev *bdev; 4601 struct spdk_bdev_desc *desc = NULL; 4602 struct spdk_io_channel *io_ch = NULL; 4603 struct spdk_bdev_channel *bdev_ch = NULL; 4604 struct timeout_io_cb_arg cb_arg; 4605 4606 ut_init_bdev(NULL); 4607 bdev = allocate_bdev("bdev"); 4608 4609 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4610 SPDK_CU_ASSERT_FATAL(desc != NULL); 4611 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4612 4613 io_ch = spdk_bdev_get_io_channel(desc); 4614 CU_ASSERT(io_ch != NULL); 4615 4616 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4617 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4618 4619 /* This is the part1. 4620 * We will check the bdev_ch->io_submitted list 4621 * TO make sure that it can link IOs and only the user submitted IOs 4622 */ 4623 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4624 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4625 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4626 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4627 stub_complete_io(1); 4628 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4629 stub_complete_io(1); 4630 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4631 4632 /* Split IO */ 4633 bdev->optimal_io_boundary = 16; 4634 bdev->split_on_optimal_io_boundary = true; 4635 4636 /* Now test that a single-vector command is split correctly. 4637 * Offset 14, length 8, payload 0xF000 4638 * Child - Offset 14, length 2, payload 0xF000 4639 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4640 * 4641 * Set up the expected values before calling spdk_bdev_read_blocks 4642 */ 4643 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4644 /* We count all submitted IOs including IO that are generated by splitting. */ 4645 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4646 stub_complete_io(1); 4647 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4648 stub_complete_io(1); 4649 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4650 4651 /* Also include the reset IO */ 4652 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4653 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4654 poll_threads(); 4655 stub_complete_io(1); 4656 poll_threads(); 4657 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4658 4659 /* This is part2 4660 * Test the desc timeout poller register 4661 */ 4662 4663 /* Successfully set the timeout */ 4664 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4665 CU_ASSERT(desc->io_timeout_poller != NULL); 4666 CU_ASSERT(desc->timeout_in_sec == 30); 4667 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4668 CU_ASSERT(desc->cb_arg == &cb_arg); 4669 4670 /* Change the timeout limit */ 4671 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4672 CU_ASSERT(desc->io_timeout_poller != NULL); 4673 CU_ASSERT(desc->timeout_in_sec == 20); 4674 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4675 CU_ASSERT(desc->cb_arg == &cb_arg); 4676 4677 /* Disable the timeout */ 4678 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4679 CU_ASSERT(desc->io_timeout_poller == NULL); 4680 4681 /* This the part3 4682 * We will test to catch timeout IO and check whether the IO is 4683 * the submitted one. 4684 */ 4685 memset(&cb_arg, 0, sizeof(cb_arg)); 4686 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4687 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4688 4689 /* Don't reach the limit */ 4690 spdk_delay_us(15 * spdk_get_ticks_hz()); 4691 poll_threads(); 4692 CU_ASSERT(cb_arg.type == 0); 4693 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4694 CU_ASSERT(cb_arg.iov.iov_len == 0); 4695 4696 /* 15 + 15 = 30 reach the limit */ 4697 spdk_delay_us(15 * spdk_get_ticks_hz()); 4698 poll_threads(); 4699 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4700 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4701 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4702 stub_complete_io(1); 4703 4704 /* Use the same split IO above and check the IO */ 4705 memset(&cb_arg, 0, sizeof(cb_arg)); 4706 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4707 4708 /* The first child complete in time */ 4709 spdk_delay_us(15 * spdk_get_ticks_hz()); 4710 poll_threads(); 4711 stub_complete_io(1); 4712 CU_ASSERT(cb_arg.type == 0); 4713 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4714 CU_ASSERT(cb_arg.iov.iov_len == 0); 4715 4716 /* The second child reach the limit */ 4717 spdk_delay_us(15 * spdk_get_ticks_hz()); 4718 poll_threads(); 4719 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4720 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4721 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4722 stub_complete_io(1); 4723 4724 /* Also include the reset IO */ 4725 memset(&cb_arg, 0, sizeof(cb_arg)); 4726 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4727 spdk_delay_us(30 * spdk_get_ticks_hz()); 4728 poll_threads(); 4729 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4730 stub_complete_io(1); 4731 poll_threads(); 4732 4733 spdk_put_io_channel(io_ch); 4734 spdk_bdev_close(desc); 4735 free_bdev(bdev); 4736 ut_fini_bdev(); 4737 } 4738 4739 static void 4740 bdev_set_qd_sampling(void) 4741 { 4742 struct spdk_bdev *bdev; 4743 struct spdk_bdev_desc *desc = NULL; 4744 struct spdk_io_channel *io_ch = NULL; 4745 struct spdk_bdev_channel *bdev_ch = NULL; 4746 struct timeout_io_cb_arg cb_arg; 4747 4748 ut_init_bdev(NULL); 4749 bdev = allocate_bdev("bdev"); 4750 4751 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4752 SPDK_CU_ASSERT_FATAL(desc != NULL); 4753 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4754 4755 io_ch = spdk_bdev_get_io_channel(desc); 4756 CU_ASSERT(io_ch != NULL); 4757 4758 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4759 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4760 4761 /* This is the part1. 4762 * We will check the bdev_ch->io_submitted list 4763 * TO make sure that it can link IOs and only the user submitted IOs 4764 */ 4765 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4766 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4767 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4768 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4769 stub_complete_io(1); 4770 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4771 stub_complete_io(1); 4772 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4773 4774 /* This is the part2. 4775 * Test the bdev's qd poller register 4776 */ 4777 /* 1st Successfully set the qd sampling period */ 4778 spdk_bdev_set_qd_sampling_period(bdev, 10); 4779 CU_ASSERT(bdev->internal.new_period == 10); 4780 CU_ASSERT(bdev->internal.period == 10); 4781 CU_ASSERT(bdev->internal.qd_desc != NULL); 4782 poll_threads(); 4783 CU_ASSERT(bdev->internal.qd_poller != NULL); 4784 4785 /* 2nd Change the qd sampling period */ 4786 spdk_bdev_set_qd_sampling_period(bdev, 20); 4787 CU_ASSERT(bdev->internal.new_period == 20); 4788 CU_ASSERT(bdev->internal.period == 10); 4789 CU_ASSERT(bdev->internal.qd_desc != NULL); 4790 poll_threads(); 4791 CU_ASSERT(bdev->internal.qd_poller != NULL); 4792 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4793 4794 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4795 spdk_delay_us(20); 4796 poll_thread_times(0, 1); 4797 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4798 spdk_bdev_set_qd_sampling_period(bdev, 30); 4799 CU_ASSERT(bdev->internal.new_period == 30); 4800 CU_ASSERT(bdev->internal.period == 20); 4801 poll_threads(); 4802 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4803 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4804 4805 /* 4th Disable the qd sampling period */ 4806 spdk_bdev_set_qd_sampling_period(bdev, 0); 4807 CU_ASSERT(bdev->internal.new_period == 0); 4808 CU_ASSERT(bdev->internal.period == 30); 4809 poll_threads(); 4810 CU_ASSERT(bdev->internal.qd_poller == NULL); 4811 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4812 CU_ASSERT(bdev->internal.qd_desc == NULL); 4813 4814 /* This is the part3. 4815 * We will test the submitted IO and reset works 4816 * properly with the qd sampling. 4817 */ 4818 memset(&cb_arg, 0, sizeof(cb_arg)); 4819 spdk_bdev_set_qd_sampling_period(bdev, 1); 4820 poll_threads(); 4821 4822 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4823 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4824 4825 /* Also include the reset IO */ 4826 memset(&cb_arg, 0, sizeof(cb_arg)); 4827 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4828 poll_threads(); 4829 4830 /* Close the desc */ 4831 spdk_put_io_channel(io_ch); 4832 spdk_bdev_close(desc); 4833 4834 /* Complete the submitted IO and reset */ 4835 stub_complete_io(2); 4836 poll_threads(); 4837 4838 free_bdev(bdev); 4839 ut_fini_bdev(); 4840 } 4841 4842 static void 4843 lba_range_overlap(void) 4844 { 4845 struct lba_range r1, r2; 4846 4847 r1.offset = 100; 4848 r1.length = 50; 4849 4850 r2.offset = 0; 4851 r2.length = 1; 4852 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4853 4854 r2.offset = 0; 4855 r2.length = 100; 4856 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4857 4858 r2.offset = 0; 4859 r2.length = 110; 4860 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4861 4862 r2.offset = 100; 4863 r2.length = 10; 4864 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4865 4866 r2.offset = 110; 4867 r2.length = 20; 4868 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4869 4870 r2.offset = 140; 4871 r2.length = 150; 4872 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4873 4874 r2.offset = 130; 4875 r2.length = 200; 4876 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4877 4878 r2.offset = 150; 4879 r2.length = 100; 4880 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4881 4882 r2.offset = 110; 4883 r2.length = 0; 4884 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4885 } 4886 4887 static bool g_lock_lba_range_done; 4888 static bool g_unlock_lba_range_done; 4889 4890 static void 4891 lock_lba_range_done(struct lba_range *range, void *ctx, int status) 4892 { 4893 g_lock_lba_range_done = true; 4894 } 4895 4896 static void 4897 unlock_lba_range_done(struct lba_range *range, void *ctx, int status) 4898 { 4899 g_unlock_lba_range_done = true; 4900 } 4901 4902 static void 4903 lock_lba_range_check_ranges(void) 4904 { 4905 struct spdk_bdev *bdev; 4906 struct spdk_bdev_desc *desc = NULL; 4907 struct spdk_io_channel *io_ch; 4908 struct spdk_bdev_channel *channel; 4909 struct lba_range *range; 4910 int ctx1; 4911 int rc; 4912 4913 ut_init_bdev(NULL); 4914 bdev = allocate_bdev("bdev0"); 4915 4916 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4917 CU_ASSERT(rc == 0); 4918 CU_ASSERT(desc != NULL); 4919 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4920 io_ch = spdk_bdev_get_io_channel(desc); 4921 CU_ASSERT(io_ch != NULL); 4922 channel = spdk_io_channel_get_ctx(io_ch); 4923 4924 g_lock_lba_range_done = false; 4925 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4926 CU_ASSERT(rc == 0); 4927 poll_threads(); 4928 4929 CU_ASSERT(g_lock_lba_range_done == true); 4930 range = TAILQ_FIRST(&channel->locked_ranges); 4931 SPDK_CU_ASSERT_FATAL(range != NULL); 4932 CU_ASSERT(range->offset == 20); 4933 CU_ASSERT(range->length == 10); 4934 CU_ASSERT(range->owner_ch == channel); 4935 4936 /* Unlocks must exactly match a lock. */ 4937 g_unlock_lba_range_done = false; 4938 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4939 CU_ASSERT(rc == -EINVAL); 4940 CU_ASSERT(g_unlock_lba_range_done == false); 4941 4942 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4943 CU_ASSERT(rc == 0); 4944 spdk_delay_us(100); 4945 poll_threads(); 4946 4947 CU_ASSERT(g_unlock_lba_range_done == true); 4948 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4949 4950 spdk_put_io_channel(io_ch); 4951 spdk_bdev_close(desc); 4952 free_bdev(bdev); 4953 ut_fini_bdev(); 4954 } 4955 4956 static void 4957 lock_lba_range_with_io_outstanding(void) 4958 { 4959 struct spdk_bdev *bdev; 4960 struct spdk_bdev_desc *desc = NULL; 4961 struct spdk_io_channel *io_ch; 4962 struct spdk_bdev_channel *channel; 4963 struct lba_range *range; 4964 char buf[4096]; 4965 int ctx1; 4966 int rc; 4967 4968 ut_init_bdev(NULL); 4969 bdev = allocate_bdev("bdev0"); 4970 4971 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4972 CU_ASSERT(rc == 0); 4973 CU_ASSERT(desc != NULL); 4974 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4975 io_ch = spdk_bdev_get_io_channel(desc); 4976 CU_ASSERT(io_ch != NULL); 4977 channel = spdk_io_channel_get_ctx(io_ch); 4978 4979 g_io_done = false; 4980 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4981 CU_ASSERT(rc == 0); 4982 4983 g_lock_lba_range_done = false; 4984 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4985 CU_ASSERT(rc == 0); 4986 poll_threads(); 4987 4988 /* The lock should immediately become valid, since there are no outstanding 4989 * write I/O. 4990 */ 4991 CU_ASSERT(g_io_done == false); 4992 CU_ASSERT(g_lock_lba_range_done == true); 4993 range = TAILQ_FIRST(&channel->locked_ranges); 4994 SPDK_CU_ASSERT_FATAL(range != NULL); 4995 CU_ASSERT(range->offset == 20); 4996 CU_ASSERT(range->length == 10); 4997 CU_ASSERT(range->owner_ch == channel); 4998 CU_ASSERT(range->locked_ctx == &ctx1); 4999 5000 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5001 CU_ASSERT(rc == 0); 5002 stub_complete_io(1); 5003 spdk_delay_us(100); 5004 poll_threads(); 5005 5006 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5007 5008 /* Now try again, but with a write I/O. */ 5009 g_io_done = false; 5010 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 5011 CU_ASSERT(rc == 0); 5012 5013 g_lock_lba_range_done = false; 5014 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5015 CU_ASSERT(rc == 0); 5016 poll_threads(); 5017 5018 /* The lock should not be fully valid yet, since a write I/O is outstanding. 5019 * But note that the range should be on the channel's locked_list, to make sure no 5020 * new write I/O are started. 5021 */ 5022 CU_ASSERT(g_io_done == false); 5023 CU_ASSERT(g_lock_lba_range_done == false); 5024 range = TAILQ_FIRST(&channel->locked_ranges); 5025 SPDK_CU_ASSERT_FATAL(range != NULL); 5026 CU_ASSERT(range->offset == 20); 5027 CU_ASSERT(range->length == 10); 5028 5029 /* Complete the write I/O. This should make the lock valid (checked by confirming 5030 * our callback was invoked). 5031 */ 5032 stub_complete_io(1); 5033 spdk_delay_us(100); 5034 poll_threads(); 5035 CU_ASSERT(g_io_done == true); 5036 CU_ASSERT(g_lock_lba_range_done == true); 5037 5038 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5039 CU_ASSERT(rc == 0); 5040 poll_threads(); 5041 5042 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5043 5044 spdk_put_io_channel(io_ch); 5045 spdk_bdev_close(desc); 5046 free_bdev(bdev); 5047 ut_fini_bdev(); 5048 } 5049 5050 static void 5051 lock_lba_range_overlapped(void) 5052 { 5053 struct spdk_bdev *bdev; 5054 struct spdk_bdev_desc *desc = NULL; 5055 struct spdk_io_channel *io_ch; 5056 struct spdk_bdev_channel *channel; 5057 struct lba_range *range; 5058 int ctx1; 5059 int rc; 5060 5061 ut_init_bdev(NULL); 5062 bdev = allocate_bdev("bdev0"); 5063 5064 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5065 CU_ASSERT(rc == 0); 5066 CU_ASSERT(desc != NULL); 5067 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5068 io_ch = spdk_bdev_get_io_channel(desc); 5069 CU_ASSERT(io_ch != NULL); 5070 channel = spdk_io_channel_get_ctx(io_ch); 5071 5072 /* Lock range 20-29. */ 5073 g_lock_lba_range_done = false; 5074 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5075 CU_ASSERT(rc == 0); 5076 poll_threads(); 5077 5078 CU_ASSERT(g_lock_lba_range_done == true); 5079 range = TAILQ_FIRST(&channel->locked_ranges); 5080 SPDK_CU_ASSERT_FATAL(range != NULL); 5081 CU_ASSERT(range->offset == 20); 5082 CU_ASSERT(range->length == 10); 5083 5084 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 5085 * 20-29. 5086 */ 5087 g_lock_lba_range_done = false; 5088 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 5089 CU_ASSERT(rc == 0); 5090 poll_threads(); 5091 5092 CU_ASSERT(g_lock_lba_range_done == false); 5093 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5094 SPDK_CU_ASSERT_FATAL(range != NULL); 5095 CU_ASSERT(range->offset == 25); 5096 CU_ASSERT(range->length == 15); 5097 5098 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 5099 * no longer overlaps with an active lock. 5100 */ 5101 g_unlock_lba_range_done = false; 5102 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5103 CU_ASSERT(rc == 0); 5104 poll_threads(); 5105 5106 CU_ASSERT(g_unlock_lba_range_done == true); 5107 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5108 range = TAILQ_FIRST(&channel->locked_ranges); 5109 SPDK_CU_ASSERT_FATAL(range != NULL); 5110 CU_ASSERT(range->offset == 25); 5111 CU_ASSERT(range->length == 15); 5112 5113 /* Lock 40-59. This should immediately lock since it does not overlap with the 5114 * currently active 25-39 lock. 5115 */ 5116 g_lock_lba_range_done = false; 5117 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 5118 CU_ASSERT(rc == 0); 5119 poll_threads(); 5120 5121 CU_ASSERT(g_lock_lba_range_done == true); 5122 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5123 SPDK_CU_ASSERT_FATAL(range != NULL); 5124 range = TAILQ_NEXT(range, tailq); 5125 SPDK_CU_ASSERT_FATAL(range != NULL); 5126 CU_ASSERT(range->offset == 40); 5127 CU_ASSERT(range->length == 20); 5128 5129 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 5130 g_lock_lba_range_done = false; 5131 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 5132 CU_ASSERT(rc == 0); 5133 poll_threads(); 5134 5135 CU_ASSERT(g_lock_lba_range_done == false); 5136 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5137 SPDK_CU_ASSERT_FATAL(range != NULL); 5138 CU_ASSERT(range->offset == 35); 5139 CU_ASSERT(range->length == 10); 5140 5141 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 5142 * the 40-59 lock is still active. 5143 */ 5144 g_unlock_lba_range_done = false; 5145 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 5146 CU_ASSERT(rc == 0); 5147 poll_threads(); 5148 5149 CU_ASSERT(g_unlock_lba_range_done == true); 5150 CU_ASSERT(g_lock_lba_range_done == false); 5151 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5152 SPDK_CU_ASSERT_FATAL(range != NULL); 5153 CU_ASSERT(range->offset == 35); 5154 CU_ASSERT(range->length == 10); 5155 5156 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 5157 * no longer any active overlapping locks. 5158 */ 5159 g_unlock_lba_range_done = false; 5160 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 5161 CU_ASSERT(rc == 0); 5162 poll_threads(); 5163 5164 CU_ASSERT(g_unlock_lba_range_done == true); 5165 CU_ASSERT(g_lock_lba_range_done == true); 5166 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5167 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5168 SPDK_CU_ASSERT_FATAL(range != NULL); 5169 CU_ASSERT(range->offset == 35); 5170 CU_ASSERT(range->length == 10); 5171 5172 /* Finally, unlock 35-44. */ 5173 g_unlock_lba_range_done = false; 5174 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 5175 CU_ASSERT(rc == 0); 5176 poll_threads(); 5177 5178 CU_ASSERT(g_unlock_lba_range_done == true); 5179 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 5180 5181 spdk_put_io_channel(io_ch); 5182 spdk_bdev_close(desc); 5183 free_bdev(bdev); 5184 ut_fini_bdev(); 5185 } 5186 5187 static void 5188 bdev_quiesce_done(void *ctx, int status) 5189 { 5190 g_lock_lba_range_done = true; 5191 } 5192 5193 static void 5194 bdev_unquiesce_done(void *ctx, int status) 5195 { 5196 g_unlock_lba_range_done = true; 5197 } 5198 5199 static void 5200 bdev_quiesce_done_unquiesce(void *ctx, int status) 5201 { 5202 struct spdk_bdev *bdev = ctx; 5203 int rc; 5204 5205 g_lock_lba_range_done = true; 5206 5207 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL); 5208 CU_ASSERT(rc == 0); 5209 } 5210 5211 static void 5212 bdev_quiesce(void) 5213 { 5214 struct spdk_bdev *bdev; 5215 struct spdk_bdev_desc *desc = NULL; 5216 struct spdk_io_channel *io_ch; 5217 struct spdk_bdev_channel *channel; 5218 struct lba_range *range; 5219 struct spdk_bdev_io *bdev_io; 5220 int ctx1; 5221 int rc; 5222 5223 ut_init_bdev(NULL); 5224 bdev = allocate_bdev("bdev0"); 5225 5226 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5227 CU_ASSERT(rc == 0); 5228 CU_ASSERT(desc != NULL); 5229 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5230 io_ch = spdk_bdev_get_io_channel(desc); 5231 CU_ASSERT(io_ch != NULL); 5232 channel = spdk_io_channel_get_ctx(io_ch); 5233 5234 g_lock_lba_range_done = false; 5235 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5236 CU_ASSERT(rc == 0); 5237 poll_threads(); 5238 5239 CU_ASSERT(g_lock_lba_range_done == true); 5240 range = TAILQ_FIRST(&channel->locked_ranges); 5241 SPDK_CU_ASSERT_FATAL(range != NULL); 5242 CU_ASSERT(range->offset == 0); 5243 CU_ASSERT(range->length == bdev->blockcnt); 5244 CU_ASSERT(range->owner_ch == NULL); 5245 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5246 SPDK_CU_ASSERT_FATAL(range != NULL); 5247 CU_ASSERT(range->offset == 0); 5248 CU_ASSERT(range->length == bdev->blockcnt); 5249 CU_ASSERT(range->owner_ch == NULL); 5250 5251 g_unlock_lba_range_done = false; 5252 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5253 CU_ASSERT(rc == 0); 5254 spdk_delay_us(100); 5255 poll_threads(); 5256 5257 CU_ASSERT(g_unlock_lba_range_done == true); 5258 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5259 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5260 5261 g_lock_lba_range_done = false; 5262 rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1); 5263 CU_ASSERT(rc == 0); 5264 poll_threads(); 5265 5266 CU_ASSERT(g_lock_lba_range_done == true); 5267 range = TAILQ_FIRST(&channel->locked_ranges); 5268 SPDK_CU_ASSERT_FATAL(range != NULL); 5269 CU_ASSERT(range->offset == 20); 5270 CU_ASSERT(range->length == 10); 5271 CU_ASSERT(range->owner_ch == NULL); 5272 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5273 SPDK_CU_ASSERT_FATAL(range != NULL); 5274 CU_ASSERT(range->offset == 20); 5275 CU_ASSERT(range->length == 10); 5276 CU_ASSERT(range->owner_ch == NULL); 5277 5278 /* Unlocks must exactly match a lock. */ 5279 g_unlock_lba_range_done = false; 5280 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1); 5281 CU_ASSERT(rc == -EINVAL); 5282 CU_ASSERT(g_unlock_lba_range_done == false); 5283 5284 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1); 5285 CU_ASSERT(rc == 0); 5286 spdk_delay_us(100); 5287 poll_threads(); 5288 5289 CU_ASSERT(g_unlock_lba_range_done == true); 5290 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5291 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5292 5293 /* Test unquiesce from quiesce cb */ 5294 g_lock_lba_range_done = false; 5295 g_unlock_lba_range_done = false; 5296 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev); 5297 CU_ASSERT(rc == 0); 5298 poll_threads(); 5299 5300 CU_ASSERT(g_lock_lba_range_done == true); 5301 CU_ASSERT(g_unlock_lba_range_done == true); 5302 5303 /* Test quiesce with read I/O */ 5304 g_lock_lba_range_done = false; 5305 g_unlock_lba_range_done = false; 5306 g_io_done = false; 5307 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5308 CU_ASSERT(rc == 0); 5309 5310 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5311 CU_ASSERT(rc == 0); 5312 poll_threads(); 5313 5314 CU_ASSERT(g_io_done == false); 5315 CU_ASSERT(g_lock_lba_range_done == false); 5316 range = TAILQ_FIRST(&channel->locked_ranges); 5317 SPDK_CU_ASSERT_FATAL(range != NULL); 5318 5319 stub_complete_io(1); 5320 spdk_delay_us(100); 5321 poll_threads(); 5322 CU_ASSERT(g_io_done == true); 5323 CU_ASSERT(g_lock_lba_range_done == true); 5324 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5325 5326 g_io_done = false; 5327 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5328 CU_ASSERT(rc == 0); 5329 5330 bdev_io = TAILQ_FIRST(&channel->io_locked); 5331 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 5332 CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20); 5333 CU_ASSERT(bdev_io->u.bdev.num_blocks == 1); 5334 5335 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5336 CU_ASSERT(rc == 0); 5337 spdk_delay_us(100); 5338 poll_threads(); 5339 5340 CU_ASSERT(g_unlock_lba_range_done == true); 5341 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5342 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5343 5344 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5345 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 5346 poll_threads(); 5347 CU_ASSERT(g_io_done == true); 5348 5349 spdk_put_io_channel(io_ch); 5350 spdk_bdev_close(desc); 5351 free_bdev(bdev); 5352 ut_fini_bdev(); 5353 } 5354 5355 static void 5356 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 5357 { 5358 g_abort_done = true; 5359 g_abort_status = bdev_io->internal.status; 5360 spdk_bdev_free_io(bdev_io); 5361 } 5362 5363 static void 5364 bdev_io_abort(void) 5365 { 5366 struct spdk_bdev *bdev; 5367 struct spdk_bdev_desc *desc = NULL; 5368 struct spdk_io_channel *io_ch; 5369 struct spdk_bdev_channel *channel; 5370 struct spdk_bdev_mgmt_channel *mgmt_ch; 5371 struct spdk_bdev_opts bdev_opts = {}; 5372 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 5373 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5374 int rc; 5375 5376 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5377 bdev_opts.bdev_io_pool_size = 7; 5378 bdev_opts.bdev_io_cache_size = 2; 5379 ut_init_bdev(&bdev_opts); 5380 5381 bdev = allocate_bdev("bdev0"); 5382 5383 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5384 CU_ASSERT(rc == 0); 5385 CU_ASSERT(desc != NULL); 5386 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5387 io_ch = spdk_bdev_get_io_channel(desc); 5388 CU_ASSERT(io_ch != NULL); 5389 channel = spdk_io_channel_get_ctx(io_ch); 5390 mgmt_ch = channel->shared_resource->mgmt_ch; 5391 5392 g_abort_done = false; 5393 5394 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5395 5396 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5397 CU_ASSERT(rc == -ENOTSUP); 5398 5399 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5400 5401 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5402 CU_ASSERT(rc == 0); 5403 CU_ASSERT(g_abort_done == true); 5404 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5405 5406 /* Test the case that the target I/O was successfully aborted. */ 5407 g_io_done = false; 5408 5409 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5410 CU_ASSERT(rc == 0); 5411 CU_ASSERT(g_io_done == false); 5412 5413 g_abort_done = false; 5414 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5415 5416 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5417 CU_ASSERT(rc == 0); 5418 CU_ASSERT(g_io_done == true); 5419 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5420 stub_complete_io(1); 5421 CU_ASSERT(g_abort_done == true); 5422 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5423 5424 /* Test the case that the target I/O was not aborted because it completed 5425 * in the middle of execution of the abort. 5426 */ 5427 g_io_done = false; 5428 5429 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5430 CU_ASSERT(rc == 0); 5431 CU_ASSERT(g_io_done == false); 5432 5433 g_abort_done = false; 5434 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5435 5436 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5437 CU_ASSERT(rc == 0); 5438 CU_ASSERT(g_io_done == false); 5439 5440 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5441 stub_complete_io(1); 5442 CU_ASSERT(g_io_done == true); 5443 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5444 5445 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5446 stub_complete_io(1); 5447 CU_ASSERT(g_abort_done == true); 5448 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5449 5450 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5451 5452 bdev->optimal_io_boundary = 16; 5453 bdev->split_on_optimal_io_boundary = true; 5454 5455 /* Test that a single-vector command which is split is aborted correctly. 5456 * Offset 14, length 8, payload 0xF000 5457 * Child - Offset 14, length 2, payload 0xF000 5458 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5459 */ 5460 g_io_done = false; 5461 5462 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5463 CU_ASSERT(rc == 0); 5464 CU_ASSERT(g_io_done == false); 5465 5466 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5467 5468 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5469 5470 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5471 CU_ASSERT(rc == 0); 5472 CU_ASSERT(g_io_done == true); 5473 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5474 stub_complete_io(2); 5475 CU_ASSERT(g_abort_done == true); 5476 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5477 5478 /* Test that a multi-vector command that needs to be split by strip and then 5479 * needs to be split is aborted correctly. Abort is requested before the second 5480 * child I/O was submitted. The parent I/O should complete with failure without 5481 * submitting the second child I/O. 5482 */ 5483 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5484 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5485 iov[i].iov_len = 512; 5486 } 5487 5488 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5489 g_io_done = false; 5490 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5491 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5492 CU_ASSERT(rc == 0); 5493 CU_ASSERT(g_io_done == false); 5494 5495 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5496 5497 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5498 5499 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5500 CU_ASSERT(rc == 0); 5501 CU_ASSERT(g_io_done == true); 5502 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5503 stub_complete_io(1); 5504 CU_ASSERT(g_abort_done == true); 5505 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5506 5507 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5508 5509 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5510 5511 bdev->optimal_io_boundary = 16; 5512 g_io_done = false; 5513 5514 /* Test that a ingle-vector command which is split is aborted correctly. 5515 * Differently from the above, the child abort request will be submitted 5516 * sequentially due to the capacity of spdk_bdev_io. 5517 */ 5518 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5519 CU_ASSERT(rc == 0); 5520 CU_ASSERT(g_io_done == false); 5521 5522 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5523 5524 g_abort_done = false; 5525 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5526 5527 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5528 CU_ASSERT(rc == 0); 5529 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5530 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5531 5532 stub_complete_io(1); 5533 CU_ASSERT(g_io_done == true); 5534 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5535 stub_complete_io(3); 5536 CU_ASSERT(g_abort_done == true); 5537 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5538 5539 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5540 5541 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5542 5543 spdk_put_io_channel(io_ch); 5544 spdk_bdev_close(desc); 5545 free_bdev(bdev); 5546 ut_fini_bdev(); 5547 } 5548 5549 static void 5550 bdev_unmap(void) 5551 { 5552 struct spdk_bdev *bdev; 5553 struct spdk_bdev_desc *desc = NULL; 5554 struct spdk_io_channel *ioch; 5555 struct spdk_bdev_channel *bdev_ch; 5556 struct ut_expected_io *expected_io; 5557 struct spdk_bdev_opts bdev_opts = {}; 5558 uint32_t i, num_outstanding; 5559 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5560 int rc; 5561 5562 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5563 bdev_opts.bdev_io_pool_size = 512; 5564 bdev_opts.bdev_io_cache_size = 64; 5565 ut_init_bdev(&bdev_opts); 5566 5567 bdev = allocate_bdev("bdev"); 5568 5569 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5570 CU_ASSERT_EQUAL(rc, 0); 5571 SPDK_CU_ASSERT_FATAL(desc != NULL); 5572 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5573 ioch = spdk_bdev_get_io_channel(desc); 5574 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5575 bdev_ch = spdk_io_channel_get_ctx(ioch); 5576 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5577 5578 fn_table.submit_request = stub_submit_request; 5579 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5580 5581 /* Case 1: First test the request won't be split */ 5582 num_blocks = 32; 5583 5584 g_io_done = false; 5585 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5586 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5587 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5588 CU_ASSERT_EQUAL(rc, 0); 5589 CU_ASSERT(g_io_done == false); 5590 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5591 stub_complete_io(1); 5592 CU_ASSERT(g_io_done == true); 5593 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5594 5595 /* Case 2: Test the split with 2 children requests */ 5596 bdev->max_unmap = 8; 5597 bdev->max_unmap_segments = 2; 5598 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5599 num_blocks = max_unmap_blocks * 2; 5600 offset = 0; 5601 5602 g_io_done = false; 5603 for (i = 0; i < 2; i++) { 5604 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5605 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5606 offset += max_unmap_blocks; 5607 } 5608 5609 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5610 CU_ASSERT_EQUAL(rc, 0); 5611 CU_ASSERT(g_io_done == false); 5612 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5613 stub_complete_io(2); 5614 CU_ASSERT(g_io_done == true); 5615 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5616 5617 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5618 num_children = 15; 5619 num_blocks = max_unmap_blocks * num_children; 5620 g_io_done = false; 5621 offset = 0; 5622 for (i = 0; i < num_children; i++) { 5623 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5624 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5625 offset += max_unmap_blocks; 5626 } 5627 5628 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5629 CU_ASSERT_EQUAL(rc, 0); 5630 CU_ASSERT(g_io_done == false); 5631 5632 while (num_children > 0) { 5633 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5634 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5635 stub_complete_io(num_outstanding); 5636 num_children -= num_outstanding; 5637 } 5638 CU_ASSERT(g_io_done == true); 5639 5640 spdk_put_io_channel(ioch); 5641 spdk_bdev_close(desc); 5642 free_bdev(bdev); 5643 ut_fini_bdev(); 5644 } 5645 5646 static void 5647 bdev_write_zeroes_split_test(void) 5648 { 5649 struct spdk_bdev *bdev; 5650 struct spdk_bdev_desc *desc = NULL; 5651 struct spdk_io_channel *ioch; 5652 struct spdk_bdev_channel *bdev_ch; 5653 struct ut_expected_io *expected_io; 5654 struct spdk_bdev_opts bdev_opts = {}; 5655 uint32_t i, num_outstanding; 5656 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5657 int rc; 5658 5659 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5660 bdev_opts.bdev_io_pool_size = 512; 5661 bdev_opts.bdev_io_cache_size = 64; 5662 ut_init_bdev(&bdev_opts); 5663 5664 bdev = allocate_bdev("bdev"); 5665 5666 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5667 CU_ASSERT_EQUAL(rc, 0); 5668 SPDK_CU_ASSERT_FATAL(desc != NULL); 5669 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5670 ioch = spdk_bdev_get_io_channel(desc); 5671 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5672 bdev_ch = spdk_io_channel_get_ctx(ioch); 5673 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5674 5675 fn_table.submit_request = stub_submit_request; 5676 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5677 5678 /* Case 1: First test the request won't be split */ 5679 num_blocks = 32; 5680 5681 g_io_done = false; 5682 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5683 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5684 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5685 CU_ASSERT_EQUAL(rc, 0); 5686 CU_ASSERT(g_io_done == false); 5687 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5688 stub_complete_io(1); 5689 CU_ASSERT(g_io_done == true); 5690 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5691 5692 /* Case 2: Test the split with 2 children requests */ 5693 max_write_zeroes_blocks = 8; 5694 bdev->max_write_zeroes = max_write_zeroes_blocks; 5695 num_blocks = max_write_zeroes_blocks * 2; 5696 offset = 0; 5697 5698 g_io_done = false; 5699 for (i = 0; i < 2; i++) { 5700 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5701 0); 5702 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5703 offset += max_write_zeroes_blocks; 5704 } 5705 5706 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5707 CU_ASSERT_EQUAL(rc, 0); 5708 CU_ASSERT(g_io_done == false); 5709 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5710 stub_complete_io(2); 5711 CU_ASSERT(g_io_done == true); 5712 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5713 5714 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5715 num_children = 15; 5716 num_blocks = max_write_zeroes_blocks * num_children; 5717 g_io_done = false; 5718 offset = 0; 5719 for (i = 0; i < num_children; i++) { 5720 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5721 0); 5722 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5723 offset += max_write_zeroes_blocks; 5724 } 5725 5726 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5727 CU_ASSERT_EQUAL(rc, 0); 5728 CU_ASSERT(g_io_done == false); 5729 5730 while (num_children > 0) { 5731 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5732 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5733 stub_complete_io(num_outstanding); 5734 num_children -= num_outstanding; 5735 } 5736 CU_ASSERT(g_io_done == true); 5737 5738 spdk_put_io_channel(ioch); 5739 spdk_bdev_close(desc); 5740 free_bdev(bdev); 5741 ut_fini_bdev(); 5742 } 5743 5744 static void 5745 bdev_set_options_test(void) 5746 { 5747 struct spdk_bdev_opts bdev_opts = {}; 5748 int rc; 5749 5750 /* Case1: Do not set opts_size */ 5751 rc = spdk_bdev_set_opts(&bdev_opts); 5752 CU_ASSERT(rc == -1); 5753 } 5754 5755 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5756 5757 static int 5758 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5759 int array_size) 5760 { 5761 if (array_size > 0 && domains) { 5762 domains[0] = g_bdev_memory_domain; 5763 } 5764 5765 return 1; 5766 } 5767 5768 static void 5769 bdev_get_memory_domains(void) 5770 { 5771 struct spdk_bdev_fn_table fn_table = { 5772 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5773 }; 5774 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5775 struct spdk_memory_domain *domains[2] = {}; 5776 int rc; 5777 5778 /* bdev is NULL */ 5779 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5780 CU_ASSERT(rc == -EINVAL); 5781 5782 /* domains is NULL */ 5783 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5784 CU_ASSERT(rc == 1); 5785 5786 /* array size is 0 */ 5787 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5788 CU_ASSERT(rc == 1); 5789 5790 /* get_supported_dma_device_types op is set */ 5791 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5792 CU_ASSERT(rc == 1); 5793 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5794 5795 /* get_supported_dma_device_types op is not set */ 5796 fn_table.get_memory_domains = NULL; 5797 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5798 CU_ASSERT(rc == 0); 5799 } 5800 5801 static void 5802 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5803 { 5804 struct spdk_bdev *bdev; 5805 struct spdk_bdev_desc *desc = NULL; 5806 struct spdk_io_channel *io_ch; 5807 char io_buf[512]; 5808 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5809 struct ut_expected_io *expected_io; 5810 int rc; 5811 5812 ut_init_bdev(NULL); 5813 5814 bdev = allocate_bdev("bdev0"); 5815 bdev->md_interleave = false; 5816 bdev->md_len = 8; 5817 5818 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5819 CU_ASSERT(rc == 0); 5820 SPDK_CU_ASSERT_FATAL(desc != NULL); 5821 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5822 io_ch = spdk_bdev_get_io_channel(desc); 5823 CU_ASSERT(io_ch != NULL); 5824 5825 /* read */ 5826 g_io_done = false; 5827 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5828 if (ext_io_opts) { 5829 expected_io->md_buf = ext_io_opts->metadata; 5830 } 5831 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5832 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5833 5834 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5835 5836 CU_ASSERT(rc == 0); 5837 CU_ASSERT(g_io_done == false); 5838 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5839 stub_complete_io(1); 5840 CU_ASSERT(g_io_done == true); 5841 5842 /* write */ 5843 g_io_done = false; 5844 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5845 if (ext_io_opts) { 5846 expected_io->md_buf = ext_io_opts->metadata; 5847 } 5848 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5849 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5850 5851 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5852 5853 CU_ASSERT(rc == 0); 5854 CU_ASSERT(g_io_done == false); 5855 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5856 stub_complete_io(1); 5857 CU_ASSERT(g_io_done == true); 5858 5859 spdk_put_io_channel(io_ch); 5860 spdk_bdev_close(desc); 5861 free_bdev(bdev); 5862 ut_fini_bdev(); 5863 5864 } 5865 5866 static void 5867 bdev_io_ext(void) 5868 { 5869 struct spdk_bdev_ext_io_opts ext_io_opts = { 5870 .metadata = (void *)0xFF000000, 5871 .size = sizeof(ext_io_opts), 5872 .dif_check_flags_exclude_mask = 0 5873 }; 5874 5875 _bdev_io_ext(&ext_io_opts); 5876 } 5877 5878 static void 5879 bdev_io_ext_no_opts(void) 5880 { 5881 _bdev_io_ext(NULL); 5882 } 5883 5884 static void 5885 bdev_io_ext_invalid_opts(void) 5886 { 5887 struct spdk_bdev *bdev; 5888 struct spdk_bdev_desc *desc = NULL; 5889 struct spdk_io_channel *io_ch; 5890 char io_buf[512]; 5891 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5892 struct spdk_bdev_ext_io_opts ext_io_opts = { 5893 .metadata = (void *)0xFF000000, 5894 .size = sizeof(ext_io_opts), 5895 .dif_check_flags_exclude_mask = 0 5896 }; 5897 int rc; 5898 5899 ut_init_bdev(NULL); 5900 5901 bdev = allocate_bdev("bdev0"); 5902 bdev->md_interleave = false; 5903 bdev->md_len = 8; 5904 5905 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5906 CU_ASSERT(rc == 0); 5907 SPDK_CU_ASSERT_FATAL(desc != NULL); 5908 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5909 io_ch = spdk_bdev_get_io_channel(desc); 5910 CU_ASSERT(io_ch != NULL); 5911 5912 /* Test invalid ext_opts size */ 5913 ext_io_opts.size = 0; 5914 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5915 CU_ASSERT(rc == -EINVAL); 5916 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5917 CU_ASSERT(rc == -EINVAL); 5918 5919 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5920 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5921 CU_ASSERT(rc == -EINVAL); 5922 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5923 CU_ASSERT(rc == -EINVAL); 5924 5925 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5926 sizeof(ext_io_opts.metadata) - 1; 5927 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5928 CU_ASSERT(rc == -EINVAL); 5929 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5930 CU_ASSERT(rc == -EINVAL); 5931 5932 spdk_put_io_channel(io_ch); 5933 spdk_bdev_close(desc); 5934 free_bdev(bdev); 5935 ut_fini_bdev(); 5936 } 5937 5938 static void 5939 bdev_io_ext_split(void) 5940 { 5941 struct spdk_bdev *bdev; 5942 struct spdk_bdev_desc *desc = NULL; 5943 struct spdk_io_channel *io_ch; 5944 char io_buf[512]; 5945 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5946 struct ut_expected_io *expected_io; 5947 struct spdk_bdev_ext_io_opts ext_io_opts = { 5948 .metadata = (void *)0xFF000000, 5949 .size = sizeof(ext_io_opts), 5950 .dif_check_flags_exclude_mask = 0 5951 }; 5952 int rc; 5953 5954 ut_init_bdev(NULL); 5955 5956 bdev = allocate_bdev("bdev0"); 5957 bdev->md_interleave = false; 5958 bdev->md_len = 8; 5959 5960 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5961 CU_ASSERT(rc == 0); 5962 SPDK_CU_ASSERT_FATAL(desc != NULL); 5963 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5964 io_ch = spdk_bdev_get_io_channel(desc); 5965 CU_ASSERT(io_ch != NULL); 5966 5967 /* Check that IO request with ext_opts and metadata is split correctly 5968 * Offset 14, length 8, payload 0xF000 5969 * Child - Offset 14, length 2, payload 0xF000 5970 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5971 */ 5972 bdev->optimal_io_boundary = 16; 5973 bdev->split_on_optimal_io_boundary = true; 5974 bdev->md_interleave = false; 5975 bdev->md_len = 8; 5976 5977 iov.iov_base = (void *)0xF000; 5978 iov.iov_len = 4096; 5979 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5980 ext_io_opts.metadata = (void *)0xFF000000; 5981 ext_io_opts.size = sizeof(ext_io_opts); 5982 g_io_done = false; 5983 5984 /* read */ 5985 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5986 expected_io->md_buf = ext_io_opts.metadata; 5987 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5988 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5989 5990 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5991 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5992 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5993 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5994 5995 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5996 CU_ASSERT(rc == 0); 5997 CU_ASSERT(g_io_done == false); 5998 5999 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6000 stub_complete_io(2); 6001 CU_ASSERT(g_io_done == true); 6002 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6003 6004 /* write */ 6005 g_io_done = false; 6006 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 6007 expected_io->md_buf = ext_io_opts.metadata; 6008 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 6009 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6010 6011 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 6012 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 6013 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 6014 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6015 6016 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 6017 CU_ASSERT(rc == 0); 6018 CU_ASSERT(g_io_done == false); 6019 6020 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6021 stub_complete_io(2); 6022 CU_ASSERT(g_io_done == true); 6023 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6024 6025 spdk_put_io_channel(io_ch); 6026 spdk_bdev_close(desc); 6027 free_bdev(bdev); 6028 ut_fini_bdev(); 6029 } 6030 6031 static void 6032 bdev_io_ext_bounce_buffer(void) 6033 { 6034 struct spdk_bdev *bdev; 6035 struct spdk_bdev_desc *desc = NULL; 6036 struct spdk_io_channel *io_ch; 6037 char io_buf[512]; 6038 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 6039 struct ut_expected_io *expected_io, *aux_io; 6040 struct spdk_bdev_ext_io_opts ext_io_opts = { 6041 .metadata = (void *)0xFF000000, 6042 .size = sizeof(ext_io_opts), 6043 .dif_check_flags_exclude_mask = 0 6044 }; 6045 int rc; 6046 6047 ut_init_bdev(NULL); 6048 6049 bdev = allocate_bdev("bdev0"); 6050 bdev->md_interleave = false; 6051 bdev->md_len = 8; 6052 6053 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6054 CU_ASSERT(rc == 0); 6055 SPDK_CU_ASSERT_FATAL(desc != NULL); 6056 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6057 io_ch = spdk_bdev_get_io_channel(desc); 6058 CU_ASSERT(io_ch != NULL); 6059 6060 /* Verify data pull/push 6061 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 6062 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 6063 6064 /* read */ 6065 g_io_done = false; 6066 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6067 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6068 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6069 6070 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6071 6072 CU_ASSERT(rc == 0); 6073 CU_ASSERT(g_io_done == false); 6074 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6075 stub_complete_io(1); 6076 CU_ASSERT(g_memory_domain_push_data_called == true); 6077 CU_ASSERT(g_io_done == true); 6078 6079 /* write */ 6080 g_io_done = false; 6081 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6082 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6083 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6084 6085 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6086 6087 CU_ASSERT(rc == 0); 6088 CU_ASSERT(g_memory_domain_pull_data_called == true); 6089 CU_ASSERT(g_io_done == false); 6090 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6091 stub_complete_io(1); 6092 CU_ASSERT(g_io_done == true); 6093 6094 /* Verify the request is queued after receiving ENOMEM from pull */ 6095 g_io_done = false; 6096 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6097 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6098 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6099 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6100 CU_ASSERT(rc == 0); 6101 CU_ASSERT(g_io_done == false); 6102 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6103 6104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6105 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6106 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6107 6108 MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM); 6109 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6110 CU_ASSERT(rc == 0); 6111 CU_ASSERT(g_io_done == false); 6112 /* The second IO has been queued */ 6113 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6114 6115 MOCK_CLEAR(spdk_memory_domain_pull_data); 6116 g_memory_domain_pull_data_called = false; 6117 stub_complete_io(1); 6118 CU_ASSERT(g_io_done == true); 6119 CU_ASSERT(g_memory_domain_pull_data_called == true); 6120 /* The second IO should be submitted now */ 6121 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6122 g_io_done = false; 6123 stub_complete_io(1); 6124 CU_ASSERT(g_io_done == true); 6125 6126 /* Verify the request is queued after receiving ENOMEM from push */ 6127 g_io_done = false; 6128 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6129 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6130 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6131 6132 MOCK_SET(spdk_memory_domain_push_data, -ENOMEM); 6133 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6134 CU_ASSERT(rc == 0); 6135 CU_ASSERT(g_io_done == false); 6136 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6137 6138 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6139 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6140 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6141 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6142 CU_ASSERT(rc == 0); 6143 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6144 6145 stub_complete_io(1); 6146 /* The IO isn't done yet, it's still waiting on push */ 6147 CU_ASSERT(g_io_done == false); 6148 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6149 MOCK_CLEAR(spdk_memory_domain_push_data); 6150 g_memory_domain_push_data_called = false; 6151 /* Completing the second IO should also trigger push on the first one */ 6152 stub_complete_io(1); 6153 CU_ASSERT(g_io_done == true); 6154 CU_ASSERT(g_memory_domain_push_data_called == true); 6155 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6156 6157 spdk_put_io_channel(io_ch); 6158 spdk_bdev_close(desc); 6159 free_bdev(bdev); 6160 ut_fini_bdev(); 6161 } 6162 6163 static void 6164 bdev_register_uuid_alias(void) 6165 { 6166 struct spdk_bdev *bdev, *second; 6167 char uuid[SPDK_UUID_STRING_LEN]; 6168 int rc; 6169 6170 ut_init_bdev(NULL); 6171 bdev = allocate_bdev("bdev0"); 6172 6173 /* Make sure an UUID was generated */ 6174 CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid)); 6175 6176 /* Check that an UUID alias was registered */ 6177 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6178 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6179 6180 /* Unregister the bdev */ 6181 spdk_bdev_unregister(bdev, NULL, NULL); 6182 poll_threads(); 6183 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6184 6185 /* Check the same, but this time register the bdev with non-zero UUID */ 6186 rc = spdk_bdev_register(bdev); 6187 CU_ASSERT_EQUAL(rc, 0); 6188 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6189 6190 /* Unregister the bdev */ 6191 spdk_bdev_unregister(bdev, NULL, NULL); 6192 poll_threads(); 6193 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6194 6195 /* Register the bdev using UUID as the name */ 6196 bdev->name = uuid; 6197 rc = spdk_bdev_register(bdev); 6198 CU_ASSERT_EQUAL(rc, 0); 6199 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6200 6201 /* Unregister the bdev */ 6202 spdk_bdev_unregister(bdev, NULL, NULL); 6203 poll_threads(); 6204 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6205 6206 /* Check that it's not possible to register two bdevs with the same UUIDs */ 6207 bdev->name = "bdev0"; 6208 second = allocate_bdev("bdev1"); 6209 spdk_uuid_copy(&bdev->uuid, &second->uuid); 6210 rc = spdk_bdev_register(bdev); 6211 CU_ASSERT_EQUAL(rc, -EEXIST); 6212 6213 /* Regenerate the UUID and re-check */ 6214 spdk_uuid_generate(&bdev->uuid); 6215 rc = spdk_bdev_register(bdev); 6216 CU_ASSERT_EQUAL(rc, 0); 6217 6218 /* And check that both bdevs can be retrieved through their UUIDs */ 6219 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6220 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6221 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 6222 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 6223 6224 free_bdev(second); 6225 free_bdev(bdev); 6226 ut_fini_bdev(); 6227 } 6228 6229 static void 6230 bdev_unregister_by_name(void) 6231 { 6232 struct spdk_bdev *bdev; 6233 int rc; 6234 6235 bdev = allocate_bdev("bdev"); 6236 6237 g_event_type1 = 0xFF; 6238 g_unregister_arg = NULL; 6239 g_unregister_rc = -1; 6240 6241 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6242 CU_ASSERT(rc == -ENODEV); 6243 6244 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6245 CU_ASSERT(rc == -ENODEV); 6246 6247 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6248 CU_ASSERT(rc == 0); 6249 6250 /* Check that unregister callback is delayed */ 6251 CU_ASSERT(g_unregister_arg == NULL); 6252 CU_ASSERT(g_unregister_rc == -1); 6253 6254 poll_threads(); 6255 6256 /* Event callback shall not be issued because device was closed */ 6257 CU_ASSERT(g_event_type1 == 0xFF); 6258 /* Unregister callback is issued */ 6259 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 6260 CU_ASSERT(g_unregister_rc == 0); 6261 6262 free_bdev(bdev); 6263 } 6264 6265 static int 6266 count_bdevs(void *ctx, struct spdk_bdev *bdev) 6267 { 6268 int *count = ctx; 6269 6270 (*count)++; 6271 6272 return 0; 6273 } 6274 6275 static void 6276 for_each_bdev_test(void) 6277 { 6278 struct spdk_bdev *bdev[8]; 6279 int rc, count; 6280 6281 bdev[0] = allocate_bdev("bdev0"); 6282 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 6283 6284 bdev[1] = allocate_bdev("bdev1"); 6285 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 6286 CU_ASSERT(rc == 0); 6287 6288 bdev[2] = allocate_bdev("bdev2"); 6289 6290 bdev[3] = allocate_bdev("bdev3"); 6291 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 6292 CU_ASSERT(rc == 0); 6293 6294 bdev[4] = allocate_bdev("bdev4"); 6295 6296 bdev[5] = allocate_bdev("bdev5"); 6297 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 6298 CU_ASSERT(rc == 0); 6299 6300 bdev[6] = allocate_bdev("bdev6"); 6301 6302 bdev[7] = allocate_bdev("bdev7"); 6303 6304 count = 0; 6305 rc = spdk_for_each_bdev(&count, count_bdevs); 6306 CU_ASSERT(rc == 0); 6307 CU_ASSERT(count == 7); 6308 6309 count = 0; 6310 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 6311 CU_ASSERT(rc == 0); 6312 CU_ASSERT(count == 4); 6313 6314 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 6315 free_bdev(bdev[0]); 6316 free_bdev(bdev[1]); 6317 free_bdev(bdev[2]); 6318 free_bdev(bdev[3]); 6319 free_bdev(bdev[4]); 6320 free_bdev(bdev[5]); 6321 free_bdev(bdev[6]); 6322 free_bdev(bdev[7]); 6323 } 6324 6325 static void 6326 bdev_seek_test(void) 6327 { 6328 struct spdk_bdev *bdev; 6329 struct spdk_bdev_desc *desc = NULL; 6330 struct spdk_io_channel *io_ch; 6331 int rc; 6332 6333 ut_init_bdev(NULL); 6334 poll_threads(); 6335 6336 bdev = allocate_bdev("bdev0"); 6337 6338 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6339 CU_ASSERT(rc == 0); 6340 poll_threads(); 6341 SPDK_CU_ASSERT_FATAL(desc != NULL); 6342 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6343 io_ch = spdk_bdev_get_io_channel(desc); 6344 CU_ASSERT(io_ch != NULL); 6345 6346 /* Seek data not supported */ 6347 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6348 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6349 CU_ASSERT(rc == 0); 6350 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6351 poll_threads(); 6352 CU_ASSERT(g_seek_offset == 0); 6353 6354 /* Seek hole not supported */ 6355 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6356 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6357 CU_ASSERT(rc == 0); 6358 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6359 poll_threads(); 6360 CU_ASSERT(g_seek_offset == UINT64_MAX); 6361 6362 /* Seek data supported */ 6363 g_seek_data_offset = 12345; 6364 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6365 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6366 CU_ASSERT(rc == 0); 6367 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6368 stub_complete_io(1); 6369 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6370 CU_ASSERT(g_seek_offset == 12345); 6371 6372 /* Seek hole supported */ 6373 g_seek_hole_offset = 67890; 6374 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6375 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6376 CU_ASSERT(rc == 0); 6377 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6378 stub_complete_io(1); 6379 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6380 CU_ASSERT(g_seek_offset == 67890); 6381 6382 spdk_put_io_channel(io_ch); 6383 spdk_bdev_close(desc); 6384 free_bdev(bdev); 6385 ut_fini_bdev(); 6386 } 6387 6388 static void 6389 bdev_copy(void) 6390 { 6391 struct spdk_bdev *bdev; 6392 struct spdk_bdev_desc *desc = NULL; 6393 struct spdk_io_channel *ioch; 6394 struct ut_expected_io *expected_io; 6395 uint64_t src_offset, num_blocks; 6396 uint32_t num_completed; 6397 int rc; 6398 6399 ut_init_bdev(NULL); 6400 bdev = allocate_bdev("bdev"); 6401 6402 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6403 CU_ASSERT_EQUAL(rc, 0); 6404 SPDK_CU_ASSERT_FATAL(desc != NULL); 6405 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6406 ioch = spdk_bdev_get_io_channel(desc); 6407 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6408 6409 fn_table.submit_request = stub_submit_request; 6410 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6411 6412 /* First test that if the bdev supports copy, the request won't be split */ 6413 bdev->md_len = 0; 6414 bdev->blocklen = 512; 6415 num_blocks = 128; 6416 src_offset = bdev->blockcnt - num_blocks; 6417 6418 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6419 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6420 6421 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6422 CU_ASSERT_EQUAL(rc, 0); 6423 num_completed = stub_complete_io(1); 6424 CU_ASSERT_EQUAL(num_completed, 1); 6425 6426 /* Check that if copy is not supported it'll still work */ 6427 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6428 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6429 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6431 6432 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6433 6434 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6435 CU_ASSERT_EQUAL(rc, 0); 6436 num_completed = stub_complete_io(1); 6437 CU_ASSERT_EQUAL(num_completed, 1); 6438 num_completed = stub_complete_io(1); 6439 CU_ASSERT_EQUAL(num_completed, 1); 6440 6441 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6442 spdk_put_io_channel(ioch); 6443 spdk_bdev_close(desc); 6444 free_bdev(bdev); 6445 ut_fini_bdev(); 6446 } 6447 6448 static void 6449 bdev_copy_split_test(void) 6450 { 6451 struct spdk_bdev *bdev; 6452 struct spdk_bdev_desc *desc = NULL; 6453 struct spdk_io_channel *ioch; 6454 struct spdk_bdev_channel *bdev_ch; 6455 struct ut_expected_io *expected_io; 6456 struct spdk_bdev_opts bdev_opts = {}; 6457 uint32_t i, num_outstanding; 6458 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6459 int rc; 6460 6461 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6462 bdev_opts.bdev_io_pool_size = 512; 6463 bdev_opts.bdev_io_cache_size = 64; 6464 rc = spdk_bdev_set_opts(&bdev_opts); 6465 CU_ASSERT(rc == 0); 6466 6467 ut_init_bdev(NULL); 6468 bdev = allocate_bdev("bdev"); 6469 6470 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6471 CU_ASSERT_EQUAL(rc, 0); 6472 SPDK_CU_ASSERT_FATAL(desc != NULL); 6473 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6474 ioch = spdk_bdev_get_io_channel(desc); 6475 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6476 bdev_ch = spdk_io_channel_get_ctx(ioch); 6477 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6478 6479 fn_table.submit_request = stub_submit_request; 6480 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6481 6482 /* Case 1: First test the request won't be split */ 6483 num_blocks = 32; 6484 src_offset = bdev->blockcnt - num_blocks; 6485 6486 g_io_done = false; 6487 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6489 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6490 CU_ASSERT_EQUAL(rc, 0); 6491 CU_ASSERT(g_io_done == false); 6492 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6493 stub_complete_io(1); 6494 CU_ASSERT(g_io_done == true); 6495 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6496 6497 /* Case 2: Test the split with 2 children requests */ 6498 max_copy_blocks = 8; 6499 bdev->max_copy = max_copy_blocks; 6500 num_children = 2; 6501 num_blocks = max_copy_blocks * num_children; 6502 offset = 0; 6503 src_offset = bdev->blockcnt - num_blocks; 6504 6505 g_io_done = false; 6506 for (i = 0; i < num_children; i++) { 6507 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6508 src_offset + offset, max_copy_blocks); 6509 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6510 offset += max_copy_blocks; 6511 } 6512 6513 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6514 CU_ASSERT_EQUAL(rc, 0); 6515 CU_ASSERT(g_io_done == false); 6516 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6517 stub_complete_io(num_children); 6518 CU_ASSERT(g_io_done == true); 6519 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6520 6521 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6522 num_children = 15; 6523 num_blocks = max_copy_blocks * num_children; 6524 offset = 0; 6525 src_offset = bdev->blockcnt - num_blocks; 6526 6527 g_io_done = false; 6528 for (i = 0; i < num_children; i++) { 6529 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6530 src_offset + offset, max_copy_blocks); 6531 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6532 offset += max_copy_blocks; 6533 } 6534 6535 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6536 CU_ASSERT_EQUAL(rc, 0); 6537 CU_ASSERT(g_io_done == false); 6538 6539 while (num_children > 0) { 6540 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6541 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6542 stub_complete_io(num_outstanding); 6543 num_children -= num_outstanding; 6544 } 6545 CU_ASSERT(g_io_done == true); 6546 6547 /* Case 4: Same test scenario as the case 2 but the configuration is different. 6548 * Copy is not supported. 6549 */ 6550 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6551 6552 num_children = 2; 6553 max_copy_blocks = spdk_bdev_get_max_copy(bdev); 6554 num_blocks = max_copy_blocks * num_children; 6555 src_offset = bdev->blockcnt - num_blocks; 6556 offset = 0; 6557 6558 g_io_done = false; 6559 for (i = 0; i < num_children; i++) { 6560 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, 6561 max_copy_blocks, 0); 6562 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6563 src_offset += max_copy_blocks; 6564 } 6565 for (i = 0; i < num_children; i++) { 6566 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, 6567 max_copy_blocks, 0); 6568 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6569 offset += max_copy_blocks; 6570 } 6571 6572 src_offset = bdev->blockcnt - num_blocks; 6573 offset = 0; 6574 6575 rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL); 6576 CU_ASSERT_EQUAL(rc, 0); 6577 CU_ASSERT(g_io_done == false); 6578 6579 while (num_children > 0) { 6580 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6581 6582 /* One copy request is split into one read and one write requests. */ 6583 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6584 stub_complete_io(num_outstanding); 6585 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6586 stub_complete_io(num_outstanding); 6587 6588 num_children -= num_outstanding; 6589 } 6590 CU_ASSERT(g_io_done == true); 6591 6592 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6593 6594 spdk_put_io_channel(ioch); 6595 spdk_bdev_close(desc); 6596 free_bdev(bdev); 6597 ut_fini_bdev(); 6598 } 6599 6600 static void 6601 examine_claim_v1(struct spdk_bdev *bdev) 6602 { 6603 int rc; 6604 6605 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6606 CU_ASSERT(rc == 0); 6607 } 6608 6609 static void 6610 examine_no_lock_held(struct spdk_bdev *bdev) 6611 { 6612 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6613 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6614 } 6615 6616 struct examine_claim_v2_ctx { 6617 struct ut_examine_ctx examine_ctx; 6618 enum spdk_bdev_claim_type claim_type; 6619 struct spdk_bdev_desc *desc; 6620 }; 6621 6622 static void 6623 examine_claim_v2(struct spdk_bdev *bdev) 6624 { 6625 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6626 int rc; 6627 6628 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6629 CU_ASSERT(rc == 0); 6630 6631 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6632 CU_ASSERT(rc == 0); 6633 } 6634 6635 static void 6636 examine_locks(void) 6637 { 6638 struct spdk_bdev *bdev; 6639 struct ut_examine_ctx ctx = { 0 }; 6640 struct examine_claim_v2_ctx v2_ctx; 6641 6642 /* Without any claims, one code path is taken */ 6643 ctx.examine_config = examine_no_lock_held; 6644 ctx.examine_disk = examine_no_lock_held; 6645 bdev = allocate_bdev_ctx("bdev0", &ctx); 6646 CU_ASSERT(ctx.examine_config_count == 1); 6647 CU_ASSERT(ctx.examine_disk_count == 1); 6648 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6649 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6650 free_bdev(bdev); 6651 6652 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6653 memset(&ctx, 0, sizeof(ctx)); 6654 ctx.examine_config = examine_claim_v1; 6655 ctx.examine_disk = examine_no_lock_held; 6656 bdev = allocate_bdev_ctx("bdev0", &ctx); 6657 CU_ASSERT(ctx.examine_config_count == 1); 6658 CU_ASSERT(ctx.examine_disk_count == 1); 6659 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6660 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6661 spdk_bdev_module_release_bdev(bdev); 6662 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6663 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6664 free_bdev(bdev); 6665 6666 /* Exercise the final path that comes with v2 claims. */ 6667 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6668 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6669 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6670 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6671 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6672 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6673 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6674 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6675 spdk_bdev_close(v2_ctx.desc); 6676 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6677 free_bdev(bdev); 6678 } 6679 6680 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6681 do { \ 6682 uint32_t len = 0; \ 6683 struct spdk_bdev_module_claim *claim; \ 6684 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6685 len++; \ 6686 } \ 6687 CU_ASSERT(len == expect); \ 6688 } while (0) 6689 6690 static void 6691 claim_v2_rwo(void) 6692 { 6693 struct spdk_bdev *bdev; 6694 struct spdk_bdev_desc *desc; 6695 struct spdk_bdev_desc *desc2; 6696 struct spdk_bdev_claim_opts opts; 6697 int rc; 6698 6699 bdev = allocate_bdev("bdev0"); 6700 6701 /* Claim without options */ 6702 desc = NULL; 6703 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6704 CU_ASSERT(rc == 0); 6705 SPDK_CU_ASSERT_FATAL(desc != NULL); 6706 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6707 &bdev_ut_if); 6708 CU_ASSERT(rc == 0); 6709 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6710 CU_ASSERT(desc->claim != NULL); 6711 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6712 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6713 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6714 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6715 6716 /* Release the claim by closing the descriptor */ 6717 spdk_bdev_close(desc); 6718 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6719 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6720 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6721 6722 /* Claim with options */ 6723 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6724 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6725 desc = NULL; 6726 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6727 CU_ASSERT(rc == 0); 6728 SPDK_CU_ASSERT_FATAL(desc != NULL); 6729 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6730 &bdev_ut_if); 6731 CU_ASSERT(rc == 0); 6732 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6733 CU_ASSERT(desc->claim != NULL); 6734 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6735 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6736 memset(&opts, 0, sizeof(opts)); 6737 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6738 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6739 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6740 6741 /* The claim blocks new writers. */ 6742 desc2 = NULL; 6743 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6744 CU_ASSERT(rc == -EPERM); 6745 CU_ASSERT(desc2 == NULL); 6746 6747 /* New readers are allowed */ 6748 desc2 = NULL; 6749 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6750 CU_ASSERT(rc == 0); 6751 CU_ASSERT(desc2 != NULL); 6752 CU_ASSERT(!desc2->write); 6753 6754 /* No new v2 RWO claims are allowed */ 6755 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6756 &bdev_ut_if); 6757 CU_ASSERT(rc == -EPERM); 6758 6759 /* No new v2 ROM claims are allowed */ 6760 CU_ASSERT(!desc2->write); 6761 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6762 &bdev_ut_if); 6763 CU_ASSERT(rc == -EPERM); 6764 CU_ASSERT(!desc2->write); 6765 6766 /* No new v2 RWM claims are allowed */ 6767 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6768 opts.shared_claim_key = (uint64_t)&opts; 6769 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6770 &bdev_ut_if); 6771 CU_ASSERT(rc == -EPERM); 6772 CU_ASSERT(!desc2->write); 6773 6774 /* No new v1 claims are allowed */ 6775 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6776 CU_ASSERT(rc == -EPERM); 6777 6778 /* None of the above changed the existing claim */ 6779 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6780 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6781 6782 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6783 spdk_bdev_close(desc); 6784 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6785 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6786 CU_ASSERT(!desc2->write); 6787 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6788 &bdev_ut_if); 6789 CU_ASSERT(rc == 0); 6790 CU_ASSERT(desc2->claim != NULL); 6791 CU_ASSERT(desc2->write); 6792 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6793 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6794 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6795 spdk_bdev_close(desc2); 6796 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6797 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6798 6799 /* Cannot claim with a key */ 6800 desc = NULL; 6801 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6802 CU_ASSERT(rc == 0); 6803 SPDK_CU_ASSERT_FATAL(desc != NULL); 6804 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6805 opts.shared_claim_key = (uint64_t)&opts; 6806 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6807 &bdev_ut_if); 6808 CU_ASSERT(rc == -EINVAL); 6809 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6810 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6811 spdk_bdev_close(desc); 6812 6813 /* Clean up */ 6814 free_bdev(bdev); 6815 } 6816 6817 static void 6818 claim_v2_rom(void) 6819 { 6820 struct spdk_bdev *bdev; 6821 struct spdk_bdev_desc *desc; 6822 struct spdk_bdev_desc *desc2; 6823 struct spdk_bdev_claim_opts opts; 6824 int rc; 6825 6826 bdev = allocate_bdev("bdev0"); 6827 6828 /* Claim without options */ 6829 desc = NULL; 6830 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6831 CU_ASSERT(rc == 0); 6832 SPDK_CU_ASSERT_FATAL(desc != NULL); 6833 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6834 &bdev_ut_if); 6835 CU_ASSERT(rc == 0); 6836 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6837 CU_ASSERT(desc->claim != NULL); 6838 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6839 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6840 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6841 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6842 6843 /* Release the claim by closing the descriptor */ 6844 spdk_bdev_close(desc); 6845 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6846 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6847 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6848 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6849 6850 /* Claim with options */ 6851 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6852 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6853 desc = NULL; 6854 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6855 CU_ASSERT(rc == 0); 6856 SPDK_CU_ASSERT_FATAL(desc != NULL); 6857 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6858 &bdev_ut_if); 6859 CU_ASSERT(rc == 0); 6860 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6861 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6862 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6863 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6864 memset(&opts, 0, sizeof(opts)); 6865 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6866 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6867 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6868 6869 /* The claim blocks new writers. */ 6870 desc2 = NULL; 6871 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6872 CU_ASSERT(rc == -EPERM); 6873 CU_ASSERT(desc2 == NULL); 6874 6875 /* New readers are allowed */ 6876 desc2 = NULL; 6877 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6878 CU_ASSERT(rc == 0); 6879 CU_ASSERT(desc2 != NULL); 6880 CU_ASSERT(!desc2->write); 6881 6882 /* No new v2 RWO claims are allowed */ 6883 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6884 &bdev_ut_if); 6885 CU_ASSERT(rc == -EPERM); 6886 6887 /* No new v2 RWM claims are allowed */ 6888 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6889 opts.shared_claim_key = (uint64_t)&opts; 6890 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6891 &bdev_ut_if); 6892 CU_ASSERT(rc == -EPERM); 6893 CU_ASSERT(!desc2->write); 6894 6895 /* No new v1 claims are allowed */ 6896 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6897 CU_ASSERT(rc == -EPERM); 6898 6899 /* None of the above messed up the existing claim */ 6900 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6901 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6902 6903 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 6904 CU_ASSERT(!desc2->write); 6905 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6906 &bdev_ut_if); 6907 CU_ASSERT(rc == 0); 6908 CU_ASSERT(!desc2->write); 6909 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6910 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 6911 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 6912 6913 /* Claim remains when closing the first descriptor */ 6914 spdk_bdev_close(desc); 6915 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6916 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 6917 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6918 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6919 6920 /* Claim removed when closing the other descriptor */ 6921 spdk_bdev_close(desc2); 6922 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6923 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6924 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6925 6926 /* Cannot claim with a key */ 6927 desc = NULL; 6928 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6929 CU_ASSERT(rc == 0); 6930 SPDK_CU_ASSERT_FATAL(desc != NULL); 6931 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6932 opts.shared_claim_key = (uint64_t)&opts; 6933 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6934 &bdev_ut_if); 6935 CU_ASSERT(rc == -EINVAL); 6936 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6937 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6938 spdk_bdev_close(desc); 6939 6940 /* Cannot claim with a read-write descriptor */ 6941 desc = NULL; 6942 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6943 CU_ASSERT(rc == 0); 6944 SPDK_CU_ASSERT_FATAL(desc != NULL); 6945 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6946 &bdev_ut_if); 6947 CU_ASSERT(rc == -EINVAL); 6948 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6949 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6950 spdk_bdev_close(desc); 6951 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6952 6953 /* Clean up */ 6954 free_bdev(bdev); 6955 } 6956 6957 static void 6958 claim_v2_rwm(void) 6959 { 6960 struct spdk_bdev *bdev; 6961 struct spdk_bdev_desc *desc; 6962 struct spdk_bdev_desc *desc2; 6963 struct spdk_bdev_claim_opts opts; 6964 char good_key, bad_key; 6965 int rc; 6966 6967 bdev = allocate_bdev("bdev0"); 6968 6969 /* Claim without options should fail */ 6970 desc = NULL; 6971 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6972 CU_ASSERT(rc == 0); 6973 SPDK_CU_ASSERT_FATAL(desc != NULL); 6974 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 6975 &bdev_ut_if); 6976 CU_ASSERT(rc == -EINVAL); 6977 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6978 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6979 CU_ASSERT(desc->claim == NULL); 6980 6981 /* Claim with options */ 6982 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6983 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6984 opts.shared_claim_key = (uint64_t)&good_key; 6985 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6986 &bdev_ut_if); 6987 CU_ASSERT(rc == 0); 6988 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 6989 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6990 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6991 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6992 memset(&opts, 0, sizeof(opts)); 6993 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6994 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6995 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6996 6997 /* The claim blocks new writers. */ 6998 desc2 = NULL; 6999 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7000 CU_ASSERT(rc == -EPERM); 7001 CU_ASSERT(desc2 == NULL); 7002 7003 /* New readers are allowed */ 7004 desc2 = NULL; 7005 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 7006 CU_ASSERT(rc == 0); 7007 CU_ASSERT(desc2 != NULL); 7008 CU_ASSERT(!desc2->write); 7009 7010 /* No new v2 RWO claims are allowed */ 7011 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 7012 &bdev_ut_if); 7013 CU_ASSERT(rc == -EPERM); 7014 7015 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 7016 CU_ASSERT(!desc2->write); 7017 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7018 &bdev_ut_if); 7019 CU_ASSERT(rc == -EPERM); 7020 CU_ASSERT(!desc2->write); 7021 7022 /* No new v1 claims are allowed */ 7023 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7024 CU_ASSERT(rc == -EPERM); 7025 7026 /* No new v2 RWM claims are allowed if the key does not match */ 7027 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7028 opts.shared_claim_key = (uint64_t)&bad_key; 7029 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7030 &bdev_ut_if); 7031 CU_ASSERT(rc == -EPERM); 7032 CU_ASSERT(!desc2->write); 7033 7034 /* None of the above messed up the existing claim */ 7035 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7036 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7037 7038 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 7039 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7040 opts.shared_claim_key = (uint64_t)&good_key; 7041 CU_ASSERT(!desc2->write); 7042 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7043 &bdev_ut_if); 7044 CU_ASSERT(rc == 0); 7045 CU_ASSERT(desc2->write); 7046 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 7047 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 7048 7049 /* Claim remains when closing the first descriptor */ 7050 spdk_bdev_close(desc); 7051 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 7052 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 7053 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 7054 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7055 7056 /* Claim removed when closing the other descriptor */ 7057 spdk_bdev_close(desc2); 7058 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7059 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7060 7061 /* Cannot claim without a key */ 7062 desc = NULL; 7063 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7064 CU_ASSERT(rc == 0); 7065 SPDK_CU_ASSERT_FATAL(desc != NULL); 7066 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7067 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7068 &bdev_ut_if); 7069 CU_ASSERT(rc == -EINVAL); 7070 spdk_bdev_close(desc); 7071 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7072 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7073 7074 /* Clean up */ 7075 free_bdev(bdev); 7076 } 7077 7078 static void 7079 claim_v2_existing_writer(void) 7080 { 7081 struct spdk_bdev *bdev; 7082 struct spdk_bdev_desc *desc; 7083 struct spdk_bdev_desc *desc2; 7084 struct spdk_bdev_claim_opts opts; 7085 enum spdk_bdev_claim_type type; 7086 enum spdk_bdev_claim_type types[] = { 7087 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7088 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7089 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7090 }; 7091 size_t i; 7092 int rc; 7093 7094 bdev = allocate_bdev("bdev0"); 7095 7096 desc = NULL; 7097 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7098 CU_ASSERT(rc == 0); 7099 SPDK_CU_ASSERT_FATAL(desc != NULL); 7100 desc2 = NULL; 7101 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7102 CU_ASSERT(rc == 0); 7103 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 7104 7105 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7106 type = types[i]; 7107 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7108 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7109 opts.shared_claim_key = (uint64_t)&opts; 7110 } 7111 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7112 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7113 CU_ASSERT(rc == -EINVAL); 7114 } else { 7115 CU_ASSERT(rc == -EPERM); 7116 } 7117 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7118 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 7119 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7120 CU_ASSERT(rc == -EINVAL); 7121 } else { 7122 CU_ASSERT(rc == -EPERM); 7123 } 7124 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7125 } 7126 7127 spdk_bdev_close(desc); 7128 spdk_bdev_close(desc2); 7129 7130 /* Clean up */ 7131 free_bdev(bdev); 7132 } 7133 7134 static void 7135 claim_v2_existing_v1(void) 7136 { 7137 struct spdk_bdev *bdev; 7138 struct spdk_bdev_desc *desc; 7139 struct spdk_bdev_claim_opts opts; 7140 enum spdk_bdev_claim_type type; 7141 enum spdk_bdev_claim_type types[] = { 7142 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7143 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7144 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7145 }; 7146 size_t i; 7147 int rc; 7148 7149 bdev = allocate_bdev("bdev0"); 7150 7151 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7152 CU_ASSERT(rc == 0); 7153 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7154 7155 desc = NULL; 7156 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7157 CU_ASSERT(rc == 0); 7158 SPDK_CU_ASSERT_FATAL(desc != NULL); 7159 7160 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7161 type = types[i]; 7162 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7163 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7164 opts.shared_claim_key = (uint64_t)&opts; 7165 } 7166 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7167 CU_ASSERT(rc == -EPERM); 7168 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7169 } 7170 7171 spdk_bdev_module_release_bdev(bdev); 7172 spdk_bdev_close(desc); 7173 7174 /* Clean up */ 7175 free_bdev(bdev); 7176 } 7177 7178 static void 7179 claim_v1_existing_v2(void) 7180 { 7181 struct spdk_bdev *bdev; 7182 struct spdk_bdev_desc *desc; 7183 struct spdk_bdev_claim_opts opts; 7184 enum spdk_bdev_claim_type type; 7185 enum spdk_bdev_claim_type types[] = { 7186 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7187 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7188 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7189 }; 7190 size_t i; 7191 int rc; 7192 7193 bdev = allocate_bdev("bdev0"); 7194 7195 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7196 type = types[i]; 7197 7198 desc = NULL; 7199 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7200 CU_ASSERT(rc == 0); 7201 SPDK_CU_ASSERT_FATAL(desc != NULL); 7202 7203 /* Get a v2 claim */ 7204 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7205 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7206 opts.shared_claim_key = (uint64_t)&opts; 7207 } 7208 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7209 CU_ASSERT(rc == 0); 7210 7211 /* Fail to get a v1 claim */ 7212 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7213 CU_ASSERT(rc == -EPERM); 7214 7215 spdk_bdev_close(desc); 7216 7217 /* Now v1 succeeds */ 7218 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7219 CU_ASSERT(rc == 0) 7220 spdk_bdev_module_release_bdev(bdev); 7221 } 7222 7223 /* Clean up */ 7224 free_bdev(bdev); 7225 } 7226 7227 static int ut_examine_claimed_init0(void); 7228 static int ut_examine_claimed_init1(void); 7229 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 7230 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 7231 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 7232 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 7233 7234 #define UT_MAX_EXAMINE_MODS 2 7235 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 7236 { 7237 .name = "vbdev_ut_examine0", 7238 .module_init = ut_examine_claimed_init0, 7239 .module_fini = vbdev_ut_module_fini, 7240 .examine_config = ut_examine_claimed_config0, 7241 .examine_disk = ut_examine_claimed_disk0, 7242 }, 7243 { 7244 .name = "vbdev_ut_examine1", 7245 .module_init = ut_examine_claimed_init1, 7246 .module_fini = vbdev_ut_module_fini, 7247 .examine_config = ut_examine_claimed_config1, 7248 .examine_disk = ut_examine_claimed_disk1, 7249 } 7250 }; 7251 7252 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 7253 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 7254 7255 struct ut_examine_claimed_ctx { 7256 uint32_t examine_config_count; 7257 uint32_t examine_disk_count; 7258 7259 /* Claim type to take, with these options */ 7260 enum spdk_bdev_claim_type claim_type; 7261 struct spdk_bdev_claim_opts claim_opts; 7262 7263 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 7264 int expect_claim_err; 7265 7266 /* Descriptor used for a claim */ 7267 struct spdk_bdev_desc *desc; 7268 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 7269 7270 bool ut_testing_examine_claimed; 7271 7272 /* 7273 * Store the order in which the modules were initialized, 7274 * since we have no guarantee on the order of execution of the constructors. 7275 * Modules are examined in reverse order of their initialization. 7276 */ 7277 static int g_ut_examine_claimed_order[UT_MAX_EXAMINE_MODS]; 7278 static int 7279 ut_examine_claimed_init(uint32_t modnum) 7280 { 7281 static int current = UT_MAX_EXAMINE_MODS; 7282 7283 /* Only do this for the first initialization of the bdev framework */ 7284 if (current == 0) { 7285 return 0; 7286 } 7287 g_ut_examine_claimed_order[modnum] = --current; 7288 7289 return 0; 7290 } 7291 7292 static int 7293 ut_examine_claimed_init0(void) 7294 { 7295 return ut_examine_claimed_init(0); 7296 } 7297 7298 static int 7299 ut_examine_claimed_init1(void) 7300 { 7301 return ut_examine_claimed_init(1); 7302 } 7303 7304 static void 7305 reset_examine_claimed_ctx(void) 7306 { 7307 struct ut_examine_claimed_ctx *ctx; 7308 uint32_t i; 7309 7310 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 7311 ctx = &examine_claimed_ctx[i]; 7312 if (ctx->desc != NULL) { 7313 spdk_bdev_close(ctx->desc); 7314 } 7315 memset(ctx, 0, sizeof(*ctx)); 7316 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 7317 } 7318 } 7319 7320 static void 7321 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 7322 { 7323 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7324 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7325 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7326 int rc; 7327 7328 if (!ut_testing_examine_claimed) { 7329 spdk_bdev_module_examine_done(module); 7330 return; 7331 } 7332 7333 ctx->examine_config_count++; 7334 7335 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 7336 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 7337 &ctx->desc); 7338 CU_ASSERT(rc == 0); 7339 7340 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 7341 CU_ASSERT(rc == ctx->expect_claim_err); 7342 } 7343 spdk_bdev_module_examine_done(module); 7344 } 7345 7346 static void 7347 ut_examine_claimed_config0(struct spdk_bdev *bdev) 7348 { 7349 examine_claimed_config(bdev, g_ut_examine_claimed_order[0]); 7350 } 7351 7352 static void 7353 ut_examine_claimed_config1(struct spdk_bdev *bdev) 7354 { 7355 examine_claimed_config(bdev, g_ut_examine_claimed_order[1]); 7356 } 7357 7358 static void 7359 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 7360 { 7361 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7362 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7363 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7364 7365 if (!ut_testing_examine_claimed) { 7366 spdk_bdev_module_examine_done(module); 7367 return; 7368 } 7369 7370 ctx->examine_disk_count++; 7371 7372 spdk_bdev_module_examine_done(module); 7373 } 7374 7375 static void 7376 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 7377 { 7378 examine_claimed_disk(bdev, 0); 7379 } 7380 7381 static void 7382 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 7383 { 7384 examine_claimed_disk(bdev, 1); 7385 } 7386 7387 static void 7388 examine_claimed(void) 7389 { 7390 struct spdk_bdev *bdev; 7391 struct spdk_bdev_module *mod = examine_claimed_mods; 7392 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 7393 7394 ut_testing_examine_claimed = true; 7395 reset_examine_claimed_ctx(); 7396 7397 /* 7398 * With one module claiming, both modules' examine_config should be called, but only the 7399 * claiming module's examine_disk should be called. 7400 */ 7401 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7402 bdev = allocate_bdev("bdev0"); 7403 CU_ASSERT(ctx[0].examine_config_count == 1); 7404 CU_ASSERT(ctx[0].examine_disk_count == 1); 7405 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7406 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7407 CU_ASSERT(ctx[1].examine_config_count == 1); 7408 CU_ASSERT(ctx[1].examine_disk_count == 0); 7409 CU_ASSERT(ctx[1].desc == NULL); 7410 reset_examine_claimed_ctx(); 7411 free_bdev(bdev); 7412 7413 /* 7414 * With two modules claiming, both modules' examine_config and examine_disk should be 7415 * called. 7416 */ 7417 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7418 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7419 bdev = allocate_bdev("bdev0"); 7420 CU_ASSERT(ctx[0].examine_config_count == 1); 7421 CU_ASSERT(ctx[0].examine_disk_count == 1); 7422 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7423 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7424 CU_ASSERT(ctx[1].examine_config_count == 1); 7425 CU_ASSERT(ctx[1].examine_disk_count == 1); 7426 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7427 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7428 reset_examine_claimed_ctx(); 7429 free_bdev(bdev); 7430 7431 /* 7432 * If two vbdev modules try to claim with conflicting claim types, the module that was added 7433 * last wins. The winner gets the claim and is the only one that has its examine_disk 7434 * callback invoked. 7435 */ 7436 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7437 ctx[0].expect_claim_err = -EPERM; 7438 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 7439 bdev = allocate_bdev("bdev0"); 7440 CU_ASSERT(ctx[0].examine_config_count == 1); 7441 CU_ASSERT(ctx[0].examine_disk_count == 0); 7442 CU_ASSERT(ctx[1].examine_config_count == 1); 7443 CU_ASSERT(ctx[1].examine_disk_count == 1); 7444 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7445 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7446 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7447 reset_examine_claimed_ctx(); 7448 free_bdev(bdev); 7449 7450 ut_testing_examine_claimed = false; 7451 } 7452 7453 int 7454 main(int argc, char **argv) 7455 { 7456 CU_pSuite suite = NULL; 7457 unsigned int num_failures; 7458 7459 CU_initialize_registry(); 7460 7461 suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown); 7462 7463 CU_ADD_TEST(suite, bytes_to_blocks_test); 7464 CU_ADD_TEST(suite, num_blocks_test); 7465 CU_ADD_TEST(suite, io_valid_test); 7466 CU_ADD_TEST(suite, open_write_test); 7467 CU_ADD_TEST(suite, claim_test); 7468 CU_ADD_TEST(suite, alias_add_del_test); 7469 CU_ADD_TEST(suite, get_device_stat_test); 7470 CU_ADD_TEST(suite, bdev_io_types_test); 7471 CU_ADD_TEST(suite, bdev_io_wait_test); 7472 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7473 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7474 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7475 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7476 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7477 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7478 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7479 CU_ADD_TEST(suite, bdev_io_alignment); 7480 CU_ADD_TEST(suite, bdev_histograms); 7481 CU_ADD_TEST(suite, bdev_write_zeroes); 7482 CU_ADD_TEST(suite, bdev_compare_and_write); 7483 CU_ADD_TEST(suite, bdev_compare); 7484 CU_ADD_TEST(suite, bdev_compare_emulated); 7485 CU_ADD_TEST(suite, bdev_zcopy_write); 7486 CU_ADD_TEST(suite, bdev_zcopy_read); 7487 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7488 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7489 CU_ADD_TEST(suite, bdev_open_ext_test); 7490 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7491 CU_ADD_TEST(suite, bdev_set_io_timeout); 7492 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7493 CU_ADD_TEST(suite, lba_range_overlap); 7494 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7495 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7496 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7497 CU_ADD_TEST(suite, bdev_quiesce); 7498 CU_ADD_TEST(suite, bdev_io_abort); 7499 CU_ADD_TEST(suite, bdev_unmap); 7500 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7501 CU_ADD_TEST(suite, bdev_set_options_test); 7502 CU_ADD_TEST(suite, bdev_get_memory_domains); 7503 CU_ADD_TEST(suite, bdev_io_ext); 7504 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7505 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7506 CU_ADD_TEST(suite, bdev_io_ext_split); 7507 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7508 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7509 CU_ADD_TEST(suite, bdev_unregister_by_name); 7510 CU_ADD_TEST(suite, for_each_bdev_test); 7511 CU_ADD_TEST(suite, bdev_seek_test); 7512 CU_ADD_TEST(suite, bdev_copy); 7513 CU_ADD_TEST(suite, bdev_copy_split_test); 7514 CU_ADD_TEST(suite, examine_locks); 7515 CU_ADD_TEST(suite, claim_v2_rwo); 7516 CU_ADD_TEST(suite, claim_v2_rom); 7517 CU_ADD_TEST(suite, claim_v2_rwm); 7518 CU_ADD_TEST(suite, claim_v2_existing_writer); 7519 CU_ADD_TEST(suite, claim_v2_existing_v1); 7520 CU_ADD_TEST(suite, claim_v1_existing_v2); 7521 CU_ADD_TEST(suite, examine_claimed); 7522 7523 allocate_cores(1); 7524 allocate_threads(1); 7525 set_thread(0); 7526 7527 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7528 CU_cleanup_registry(); 7529 7530 free_threads(); 7531 free_cores(); 7532 7533 return num_failures; 7534 } 7535